Add const qualifiers to CodeGen's use of LLVM IR constructs.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101334 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dan Gohman 2010-04-15 01:51:59 +00:00
parent cff6f85454
commit 46510a73e9
64 changed files with 577 additions and 557 deletions

View File

@ -87,28 +87,28 @@ public:
/// LLVM IR instruction, and append generated machine instructions to
/// the current block. Return true if selection was successful.
///
bool SelectInstruction(Instruction *I);
bool SelectInstruction(const Instruction *I);
/// SelectOperator - Do "fast" instruction selection for the given
/// LLVM IR operator (Instruction or ConstantExpr), and append
/// generated machine instructions to the current block. Return true
/// if selection was successful.
///
bool SelectOperator(User *I, unsigned Opcode);
bool SelectOperator(const User *I, unsigned Opcode);
/// getRegForValue - Create a virtual register and arrange for it to
/// be assigned the value for the given LLVM value.
unsigned getRegForValue(Value *V);
unsigned getRegForValue(const Value *V);
/// lookUpRegForValue - Look up the value to see if its value is already
/// cached in a register. It may be defined by instructions across blocks or
/// defined locally.
unsigned lookUpRegForValue(Value *V);
unsigned lookUpRegForValue(const Value *V);
/// getRegForGEPIndex - This is a wrapper around getRegForValue that also
/// takes care of truncating or sign-extending the given getelementptr
/// index value.
unsigned getRegForGEPIndex(Value *V);
unsigned getRegForGEPIndex(const Value *V);
virtual ~FastISel();
@ -128,7 +128,7 @@ protected:
/// fit into FastISel's framework. It returns true if it was successful.
///
virtual bool
TargetSelectInstruction(Instruction *I) = 0;
TargetSelectInstruction(const Instruction *I) = 0;
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type and opcode
@ -170,7 +170,7 @@ protected:
virtual unsigned FastEmit_rf(MVT VT,
MVT RetVT,
unsigned Opcode,
unsigned Op0, ConstantFP *FPImm);
unsigned Op0, const ConstantFP *FPImm);
/// FastEmit_rri - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
@ -196,7 +196,7 @@ protected:
/// FastEmit_rr instead.
unsigned FastEmit_rf_(MVT VT,
unsigned Opcode,
unsigned Op0, ConstantFP *FPImm,
unsigned Op0, const ConstantFP *FPImm,
MVT ImmType);
/// FastEmit_i - This method is called by target-independent code
@ -213,7 +213,7 @@ protected:
virtual unsigned FastEmit_f(MVT VT,
MVT RetVT,
unsigned Opcode,
ConstantFP *FPImm);
const ConstantFP *FPImm);
/// FastEmitInst_ - Emit a MachineInstr with no operands and a
/// result register in the given register class.
@ -247,7 +247,7 @@ protected:
///
unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, ConstantFP *FPImm);
unsigned Op0, const ConstantFP *FPImm);
/// FastEmitInst_rri - Emit a MachineInstr with two register operands,
/// an immediate, and a result register in the given register class.
@ -277,34 +277,34 @@ protected:
/// the CFG.
void FastEmitBranch(MachineBasicBlock *MBB);
unsigned UpdateValueMap(Value* I, unsigned Reg);
unsigned UpdateValueMap(const Value* I, unsigned Reg);
unsigned createResultReg(const TargetRegisterClass *RC);
/// TargetMaterializeConstant - Emit a constant in a register using
/// target-specific logic, such as constant pool loads.
virtual unsigned TargetMaterializeConstant(Constant* C) {
virtual unsigned TargetMaterializeConstant(const Constant* C) {
return 0;
}
/// TargetMaterializeAlloca - Emit an alloca address in a register using
/// target-specific logic.
virtual unsigned TargetMaterializeAlloca(AllocaInst* C) {
virtual unsigned TargetMaterializeAlloca(const AllocaInst* C) {
return 0;
}
private:
bool SelectBinaryOp(User *I, unsigned ISDOpcode);
bool SelectBinaryOp(const User *I, unsigned ISDOpcode);
bool SelectFNeg(User *I);
bool SelectFNeg(const User *I);
bool SelectGetElementPtr(User *I);
bool SelectGetElementPtr(const User *I);
bool SelectCall(User *I);
bool SelectCall(const User *I);
bool SelectBitCast(User *I);
bool SelectBitCast(const User *I);
bool SelectCast(User *I, unsigned Opcode);
bool SelectCast(const User *I, unsigned Opcode);
};
}

View File

@ -68,9 +68,9 @@ namespace llvm {
struct GCRoot {
int Num; //< Usually a frame index.
int StackOffset; //< Offset from the stack pointer.
Constant *Metadata; //< Metadata straight from the call to llvm.gcroot.
const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
GCRoot(int N, Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};
@ -114,7 +114,7 @@ namespace llvm {
/// addStackRoot - Registers a root that lives on the stack. Num is the
/// stack object ID for the alloca (if the code generator is
// using MachineFrameInfo).
void addStackRoot(int Num, Constant *Metadata) {
void addStackRoot(int Num, const Constant *Metadata) {
Roots.push_back(GCRoot(Num, Metadata));
}

View File

@ -74,7 +74,7 @@ class MachineConstantPoolEntry {
public:
/// The constant itself.
union {
Constant *ConstVal;
const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
@ -82,7 +82,7 @@ public:
/// a MachineConstantPoolValue.
unsigned Alignment;
MachineConstantPoolEntry(Constant *V, unsigned A)
MachineConstantPoolEntry(const Constant *V, unsigned A)
: Alignment(A) {
Val.ConstVal = V;
}
@ -143,7 +143,7 @@ public:
/// getConstantPoolIndex - Create a new entry in the constant pool or return
/// an existing one. User must specify the minimum required alignment for
/// the object.
unsigned getConstantPoolIndex(Constant *C, unsigned Alignment);
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment);
/// isEmpty - Return true if this constant pool contains no constants.

View File

@ -104,7 +104,7 @@ public:
return *this;
}
const MachineInstrBuilder &addGlobalAddress(GlobalValue *GV,
const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(MachineOperand::CreateGA(GV, Offset, TargetFlags));

View File

@ -82,7 +82,7 @@ struct LandingPadInfo {
SmallVector<MCSymbol*, 1> BeginLabels; // Labels prior to invoke.
SmallVector<MCSymbol*, 1> EndLabels; // Labels after invoke.
MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
Function *Personality; // Personality function.
const Function *Personality; // Personality function.
std::vector<int> TypeIds; // List of type ids (filters negative)
explicit LandingPadInfo(MachineBasicBlock *MBB)
@ -101,7 +101,7 @@ class MachineModuleInfo : public ImmutablePass {
MCContext Context;
/// TheModule - This is the LLVM Module being worked on.
Module *TheModule;
const Module *TheModule;
/// ObjFileMMI - This is the object-file-format-specific implementation of
/// MachineModuleInfoImpl, which lets targets accumulate whatever info they
@ -125,7 +125,7 @@ class MachineModuleInfo : public ImmutablePass {
// TypeInfos - List of C++ TypeInfo used in the current function.
//
std::vector<GlobalVariable *> TypeInfos;
std::vector<const GlobalVariable *> TypeInfos;
// FilterIds - List of typeids encoding filters used in the current function.
//
@ -138,7 +138,7 @@ class MachineModuleInfo : public ImmutablePass {
// Personalities - Vector of all personality functions ever seen. Used to emit
// common EH frames.
std::vector<Function *> Personalities;
std::vector<const Function *> Personalities;
/// UsedFunctions - The functions in the @llvm.used list in a more easily
/// searchable format. This does not include the functions in
@ -179,8 +179,8 @@ public:
const MCContext &getContext() const { return Context; }
MCContext &getContext() { return Context; }
void setModule(Module *M) { TheModule = M; }
Module *getModule() const { return TheModule; }
void setModule(const Module *M) { TheModule = M; }
const Module *getModule() const { return TheModule; }
/// getInfo - Keep track of various per-function pieces of information for
/// backends that would like to do so.
@ -199,7 +199,7 @@ public:
/// AnalyzeModule - Scan the module for global debug information.
///
void AnalyzeModule(Module &M);
void AnalyzeModule(const Module &M);
/// hasDebugInfo - Returns true if valid debug info is present.
///
@ -252,14 +252,15 @@ public:
/// addPersonality - Provide the personality function for the exception
/// information.
void addPersonality(MachineBasicBlock *LandingPad, Function *Personality);
void addPersonality(MachineBasicBlock *LandingPad,
const Function *Personality);
/// getPersonalityIndex - Get index of the current personality function inside
/// Personalitites array
unsigned getPersonalityIndex() const;
/// getPersonalities - Return array of personality functions ever seen.
const std::vector<Function *>& getPersonalities() const {
const std::vector<const Function *>& getPersonalities() const {
return Personalities;
}
@ -273,12 +274,12 @@ public:
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void addCatchTypeInfo(MachineBasicBlock *LandingPad,
std::vector<GlobalVariable *> &TyInfo);
std::vector<const GlobalVariable *> &TyInfo);
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void addFilterTypeInfo(MachineBasicBlock *LandingPad,
std::vector<GlobalVariable *> &TyInfo);
std::vector<const GlobalVariable *> &TyInfo);
/// addCleanup - Add a cleanup action for a landing pad.
///
@ -286,7 +287,7 @@ public:
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned getTypeIDFor(GlobalVariable *TI);
unsigned getTypeIDFor(const GlobalVariable *TI);
/// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
/// function wide.
@ -323,7 +324,7 @@ public:
/// getTypeInfos - Return a reference to the C++ typeinfo for the current
/// function.
const std::vector<GlobalVariable *> &getTypeInfos() const {
const std::vector<const GlobalVariable *> &getTypeInfos() const {
return TypeInfos;
}
@ -335,7 +336,7 @@ public:
/// getPersonality - Return a personality function if available. The presence
/// of one is required to emit exception handling info.
Function *getPersonality() const;
const Function *getPersonality() const;
/// setVariableDbgInfo - Collect information used to emit debugging
/// information of a variable.

View File

@ -117,8 +117,8 @@ private:
union {
int Index; // For MO_*Index - The index itself.
const char *SymbolName; // For MO_ExternalSymbol.
GlobalValue *GV; // For MO_GlobalAddress.
BlockAddress *BA; // For MO_BlockAddress.
const GlobalValue *GV; // For MO_GlobalAddress.
const BlockAddress *BA; // For MO_BlockAddress.
} Val;
int64_t Offset; // An offset from the object.
} OffsetedInfo;
@ -315,12 +315,12 @@ public:
return Contents.OffsetedInfo.Val.Index;
}
GlobalValue *getGlobal() const {
const GlobalValue *getGlobal() const {
assert(isGlobal() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.GV;
}
BlockAddress *getBlockAddress() const {
const BlockAddress *getBlockAddress() const {
assert(isBlockAddress() && "Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.BA;
}
@ -457,7 +457,7 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateGA(GlobalValue *GV, int64_t Offset,
static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_GlobalAddress);
Op.Contents.OffsetedInfo.Val.GV = GV;
@ -473,7 +473,7 @@ public:
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateBA(BlockAddress *BA,
static MachineOperand CreateBA(const BlockAddress *BA,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_BlockAddress);
Op.Contents.OffsetedInfo.Val.BA = BA;

View File

@ -350,10 +350,10 @@ public:
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
return getJumpTable(JTI, VT, true, TargetFlags);
}
SDValue getConstantPool(Constant *C, EVT VT,
SDValue getConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offs = 0, bool isT=false,
unsigned char TargetFlags = 0);
SDValue getTargetConstantPool(Constant *C, EVT VT,
SDValue getTargetConstantPool(const Constant *C, EVT VT,
unsigned Align = 0, int Offset = 0,
unsigned char TargetFlags = 0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
@ -377,7 +377,7 @@ public:
SDValue getValueType(EVT);
SDValue getRegister(unsigned Reg, EVT VT);
SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label);
SDValue getBlockAddress(BlockAddress *BA, EVT VT,
SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
bool isTarget = false, unsigned char TargetFlags = 0);
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) {
@ -767,7 +767,7 @@ public:
///
SDDbgValue *getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
DebugLoc DL, unsigned O);
SDDbgValue *getDbgValue(MDNode *MDPtr, Value *C, uint64_t Off,
SDDbgValue *getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
DebugLoc DL, unsigned O);
SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
DebugLoc DL, unsigned O);

View File

@ -279,22 +279,23 @@ private:
const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo);
void PrepareEHLandingPad(MachineBasicBlock *BB);
void SelectAllBasicBlocks(Function &Fn);
void SelectAllBasicBlocks(const Function &Fn);
void FinishBasicBlock();
void SelectBasicBlock(BasicBlock *LLVMBB,
BasicBlock::iterator Begin,
BasicBlock::iterator End,
void SelectBasicBlock(const BasicBlock *LLVMBB,
BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall);
void CodeGenAndEmitDAG();
void LowerArguments(BasicBlock *BB);
void LowerArguments(const BasicBlock *BB);
void ShrinkDemandedOps();
void ComputeLiveOutVRegInfo();
void HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB);
void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
bool HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB, FastISel *F);
bool HandlePHINodesInSuccessorBlocksFast(const BasicBlock *LLVMBB,
FastISel *F);
/// Create the scheduler. If a specific scheduler was specified
/// via the SchedulerRegistry, use it, otherwise select the

View File

@ -1140,7 +1140,7 @@ public:
};
class GlobalAddressSDNode : public SDNode {
GlobalValue *TheGlobal;
const GlobalValue *TheGlobal;
int64_t Offset;
unsigned char TargetFlags;
friend class SelectionDAG;
@ -1148,7 +1148,7 @@ class GlobalAddressSDNode : public SDNode {
int64_t o, unsigned char TargetFlags);
public:
GlobalValue *getGlobal() const { return TheGlobal; }
const GlobalValue *getGlobal() const { return TheGlobal; }
int64_t getOffset() const { return Offset; }
unsigned char getTargetFlags() const { return TargetFlags; }
// Return the address space this GlobalAddress belongs to.
@ -1203,15 +1203,15 @@ public:
class ConstantPoolSDNode : public SDNode {
union {
Constant *ConstVal;
const Constant *ConstVal;
MachineConstantPoolValue *MachineCPVal;
} Val;
int Offset; // It's a MachineConstantPoolValue if top bit is set.
unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
unsigned char TargetFlags;
friend class SelectionDAG;
ConstantPoolSDNode(bool isTarget, Constant *c, EVT VT, int o, unsigned Align,
unsigned char TF)
ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
unsigned Align, unsigned char TF)
: SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
DebugLoc(),
getSDVTList(VT)), Offset(o), Alignment(Align), TargetFlags(TF) {
@ -1234,7 +1234,7 @@ public:
return (int)Offset < 0;
}
Constant *getConstVal() const {
const Constant *getConstVal() const {
assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
return Val.ConstVal;
}
@ -1360,16 +1360,16 @@ public:
};
class BlockAddressSDNode : public SDNode {
BlockAddress *BA;
const BlockAddress *BA;
unsigned char TargetFlags;
friend class SelectionDAG;
BlockAddressSDNode(unsigned NodeTy, EVT VT, BlockAddress *ba,
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
unsigned char Flags)
: SDNode(NodeTy, DebugLoc(), getSDVTList(VT)),
BA(ba), TargetFlags(Flags) {
}
public:
BlockAddress *getBlockAddress() const { return BA; }
const BlockAddress *getBlockAddress() const { return BA; }
unsigned char getTargetFlags() const { return TargetFlags; }
static bool classof(const BlockAddressSDNode *) { return true; }

View File

@ -317,7 +317,7 @@ public:
};
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
CallInst &I, unsigned Intrinsic) {
const CallInst &I, unsigned Intrinsic) {
return false;
}
@ -864,7 +864,7 @@ public:
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
virtual bool
isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
/// PerformDAGCombine - This method will be invoked for all target nodes and
/// for any target-independent nodes that the target has registered with

View File

@ -425,7 +425,7 @@ bool DwarfException::CallToNoUnwindFunction(const MachineInstr *MI) {
if (!MO.isGlobal()) continue;
Function *F = dyn_cast<Function>(MO.getGlobal());
const Function *F = dyn_cast<Function>(MO.getGlobal());
if (F == 0) continue;
if (SawFunc) {
@ -579,7 +579,7 @@ ComputeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
/// 3. Type ID table contains references to all the C++ typeinfo for all
/// catches in the function. This tables is reverse indexed base 1.
void DwarfException::EmitExceptionTable() {
const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
@ -861,7 +861,7 @@ void DwarfException::EmitExceptionTable() {
Asm->OutStreamer.AddComment("-- Catch TypeInfos --");
Asm->OutStreamer.AddBlankLine();
}
for (std::vector<GlobalVariable *>::const_reverse_iterator
for (std::vector<const GlobalVariable *>::const_reverse_iterator
I = TypeInfos.rbegin(), E = TypeInfos.rend(); I != E; ++I) {
const GlobalVariable *GV = *I;
@ -896,7 +896,7 @@ void DwarfException::EndModule() {
if (!shouldEmitMovesModule && !shouldEmitTableModule)
return;
const std::vector<Function *> Personalities = MMI->getPersonalities();
const std::vector<const Function *> Personalities = MMI->getPersonalities();
for (unsigned I = 0, E = Personalities.size(); I < E; ++I)
EmitCIE(Personalities[I], I);

View File

@ -331,7 +331,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
IRBuilder<> Builder(CI->getParent(), CI);
LLVMContext &Context = CI->getContext();
Function *Callee = CI->getCalledFunction();
const Function *Callee = CI->getCalledFunction();
assert(Callee && "Cannot lower an indirect call!");
switch (Callee->getIntrinsicID()) {

View File

@ -630,7 +630,7 @@ MachineConstantPool::~MachineConstantPool() {
/// CanShareConstantPoolEntry - Test whether the given two constants
/// can be allocated the same constant pool entry.
static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const TargetData *TD) {
// Handle the trivial case quickly.
if (A == B) return true;
@ -645,17 +645,17 @@ static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
// If a floating-point value and an integer value have the same encoding,
// they can share a constant-pool entry.
if (ConstantFP *AFP = dyn_cast<ConstantFP>(A))
if (ConstantInt *BI = dyn_cast<ConstantInt>(B))
if (const ConstantFP *AFP = dyn_cast<ConstantFP>(A))
if (const ConstantInt *BI = dyn_cast<ConstantInt>(B))
return AFP->getValueAPF().bitcastToAPInt() == BI->getValue();
if (ConstantFP *BFP = dyn_cast<ConstantFP>(B))
if (ConstantInt *AI = dyn_cast<ConstantInt>(A))
if (const ConstantFP *BFP = dyn_cast<ConstantFP>(B))
if (const ConstantInt *AI = dyn_cast<ConstantInt>(A))
return BFP->getValueAPF().bitcastToAPInt() == AI->getValue();
// Two vectors can share an entry if each pair of corresponding
// elements could.
if (ConstantVector *AV = dyn_cast<ConstantVector>(A))
if (ConstantVector *BV = dyn_cast<ConstantVector>(B)) {
if (const ConstantVector *AV = dyn_cast<ConstantVector>(A))
if (const ConstantVector *BV = dyn_cast<ConstantVector>(B)) {
if (AV->getType()->getNumElements() != BV->getType()->getNumElements())
return false;
for (unsigned i = 0, e = AV->getType()->getNumElements(); i != e; ++i)
@ -674,7 +674,7 @@ static bool CanShareConstantPoolEntry(Constant *A, Constant *B,
/// an existing one. User must specify the log2 of the minimum required
/// alignment for the object.
///
unsigned MachineConstantPool::getConstantPoolIndex(Constant *C,
unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
unsigned Alignment) {
assert(Alignment && "Alignment must be specified!");
if (Alignment > PoolAlignment) PoolAlignment = Alignment;

View File

@ -315,18 +315,18 @@ void MachineModuleInfo::EndFunction() {
/// AnalyzeModule - Scan the module for global debug information.
///
void MachineModuleInfo::AnalyzeModule(Module &M) {
void MachineModuleInfo::AnalyzeModule(const Module &M) {
// Insert functions in the llvm.used array (but not llvm.compiler.used) into
// UsedFunctions.
GlobalVariable *GV = M.getGlobalVariable("llvm.used");
const GlobalVariable *GV = M.getGlobalVariable("llvm.used");
if (!GV || !GV->hasInitializer()) return;
// Should be an array of 'i8*'.
ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
if (InitList == 0) return;
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
if (Function *F =
if (const Function *F =
dyn_cast<Function>(InitList->getOperand(i)->stripPointerCasts()))
UsedFunctions.insert(F);
}
@ -407,7 +407,7 @@ MCSymbol *MachineModuleInfo::addLandingPad(MachineBasicBlock *LandingPad) {
/// addPersonality - Provide the personality function for the exception
/// information.
void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
Function *Personality) {
const Function *Personality) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.Personality = Personality;
@ -426,7 +426,7 @@ void MachineModuleInfo::addPersonality(MachineBasicBlock *LandingPad,
/// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
///
void MachineModuleInfo::addCatchTypeInfo(MachineBasicBlock *LandingPad,
std::vector<GlobalVariable *> &TyInfo) {
std::vector<const GlobalVariable *> &TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
for (unsigned N = TyInfo.size(); N; --N)
LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
@ -435,7 +435,7 @@ void MachineModuleInfo::addCatchTypeInfo(MachineBasicBlock *LandingPad,
/// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
///
void MachineModuleInfo::addFilterTypeInfo(MachineBasicBlock *LandingPad,
std::vector<GlobalVariable *> &TyInfo) {
std::vector<const GlobalVariable *> &TyInfo) {
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
std::vector<unsigned> IdsInFilter(TyInfo.size());
for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
@ -492,7 +492,7 @@ void MachineModuleInfo::TidyLandingPads() {
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned MachineModuleInfo::getTypeIDFor(GlobalVariable *TI) {
unsigned MachineModuleInfo::getTypeIDFor(const GlobalVariable *TI) {
for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
if (TypeInfos[i] == TI) return i + 1;
@ -532,7 +532,7 @@ try_next:;
}
/// getPersonality - Return the personality function for the current function.
Function *MachineModuleInfo::getPersonality() const {
const Function *MachineModuleInfo::getPersonality() const {
// FIXME: Until PR1414 will be fixed, we're using 1 personality function per
// function
return !LandingPads.empty() ? LandingPads[0].Personality : NULL;

View File

@ -6288,7 +6288,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) {
/// FindBaseOffset - Return true if base is a frame index, which is known not
// to alias with anything but itself. Provides base object and offset as results.
static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
GlobalValue *&GV, void *&CV) {
const GlobalValue *&GV, void *&CV) {
// Assume it is a primitive operation.
Base = Ptr; Offset = 0; GV = 0; CV = 0;
@ -6336,7 +6336,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
GlobalValue *GV1, *GV2;
const GlobalValue *GV1, *GV2;
void *CV1, *CV2;
bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1);
bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2);

View File

@ -55,7 +55,7 @@
#include "FunctionLoweringInfo.h"
using namespace llvm;
unsigned FastISel::getRegForValue(Value *V) {
unsigned FastISel::getRegForValue(const Value *V) {
EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
// Don't handle non-simple values in FastISel.
if (!RealVT.isSimple())
@ -83,7 +83,7 @@ unsigned FastISel::getRegForValue(Value *V) {
if (Reg != 0)
return Reg;
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getValue().getActiveBits() <= 64)
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<AllocaInst>(V)) {
@ -93,7 +93,7 @@ unsigned FastISel::getRegForValue(Value *V) {
// local-CSE'd with actual integer zeros.
Reg =
getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
// Try to emit the constant directly.
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
@ -116,7 +116,7 @@ unsigned FastISel::getRegForValue(Value *V) {
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
}
}
} else if (Operator *Op = dyn_cast<Operator>(V)) {
} else if (const Operator *Op = dyn_cast<Operator>(V)) {
if (!SelectOperator(Op, Op->getOpcode())) return 0;
Reg = LocalValueMap[Op];
} else if (isa<UndefValue>(V)) {
@ -136,7 +136,7 @@ unsigned FastISel::getRegForValue(Value *V) {
return Reg;
}
unsigned FastISel::lookUpRegForValue(Value *V) {
unsigned FastISel::lookUpRegForValue(const Value *V) {
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
@ -152,7 +152,7 @@ unsigned FastISel::lookUpRegForValue(Value *V) {
/// NOTE: This is only necessary because we might select a block that uses
/// a value before we select the block that defines the value. It might be
/// possible to fix this by selecting blocks in reverse postorder.
unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
return Reg;
@ -169,7 +169,7 @@ unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
return AssignedReg;
}
unsigned FastISel::getRegForGEPIndex(Value *Idx) {
unsigned FastISel::getRegForGEPIndex(const Value *Idx) {
unsigned IdxN = getRegForValue(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
@ -188,7 +188,7 @@ unsigned FastISel::getRegForGEPIndex(Value *Idx) {
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
/// which has an opcode which directly corresponds to the given ISD opcode.
///
bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
@ -254,7 +254,7 @@ bool FastISel::SelectBinaryOp(User *I, unsigned ISDOpcode) {
return true;
}
bool FastISel::SelectGetElementPtr(User *I) {
bool FastISel::SelectGetElementPtr(const User *I) {
unsigned N = getRegForValue(I->getOperand(0));
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
@ -262,9 +262,9 @@ bool FastISel::SelectGetElementPtr(User *I) {
const Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
OI != E; ++OI) {
Value *Idx = *OI;
for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
E = I->op_end(); OI != E; ++OI) {
const Value *Idx = *OI;
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
@ -282,7 +282,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
@ -318,8 +318,8 @@ bool FastISel::SelectGetElementPtr(User *I) {
return true;
}
bool FastISel::SelectCall(User *I) {
Function *F = cast<CallInst>(I)->getCalledFunction();
bool FastISel::SelectCall(const User *I) {
const Function *F = cast<CallInst>(I)->getCalledFunction();
if (!F) return false;
// Handle selected intrinsic function calls.
@ -327,17 +327,17 @@ bool FastISel::SelectCall(User *I) {
switch (IID) {
default: break;
case Intrinsic::dbg_declare: {
DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None) ||
!MF.getMMI().hasDebugInfo())
return true;
Value *Address = DI->getAddress();
const Value *Address = DI->getAddress();
if (!Address)
return true;
if (isa<UndefValue>(Address))
return true;
AllocaInst *AI = dyn_cast<AllocaInst>(Address);
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI) break;
DenseMap<const AllocaInst*, int>::iterator SI =
@ -354,18 +354,18 @@ bool FastISel::SelectCall(User *I) {
}
case Intrinsic::dbg_value: {
// This form of DBG_VALUE is target-independent.
DbgValueInst *DI = cast<DbgValueInst>(I);
const DbgValueInst *DI = cast<DbgValueInst>(I);
const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
Value *V = DI->getValue();
const Value *V = DI->getValue();
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
addMetadata(DI->getVariable());
} else if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
addMetadata(DI->getVariable());
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
@ -448,7 +448,7 @@ bool FastISel::SelectCall(User *I) {
return false;
}
bool FastISel::SelectCast(User *I, unsigned Opcode) {
bool FastISel::SelectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
@ -500,7 +500,7 @@ bool FastISel::SelectCast(User *I, unsigned Opcode) {
return true;
}
bool FastISel::SelectBitCast(User *I) {
bool FastISel::SelectBitCast(const User *I) {
// If the bitcast doesn't change the type, just use the operand value.
if (I->getType() == I->getOperand(0)->getType()) {
unsigned Reg = getRegForValue(I->getOperand(0));
@ -551,7 +551,7 @@ bool FastISel::SelectBitCast(User *I) {
}
bool
FastISel::SelectInstruction(Instruction *I) {
FastISel::SelectInstruction(const Instruction *I) {
// First, try doing target-independent selection.
if (SelectOperator(I, I->getOpcode()))
return true;
@ -580,7 +580,7 @@ FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
/// SelectFNeg - Emit an FNeg operation.
///
bool
FastISel::SelectFNeg(User *I) {
FastISel::SelectFNeg(const User *I) {
unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
if (OpReg == 0) return false;
@ -621,7 +621,7 @@ FastISel::SelectFNeg(User *I) {
}
bool
FastISel::SelectOperator(User *I, unsigned Opcode) {
FastISel::SelectOperator(const User *I, unsigned Opcode) {
switch (Opcode) {
case Instruction::Add:
return SelectBinaryOp(I, ISD::ADD);
@ -667,10 +667,10 @@ FastISel::SelectOperator(User *I, unsigned Opcode) {
return SelectGetElementPtr(I);
case Instruction::Br: {
BranchInst *BI = cast<BranchInst>(I);
const BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
BasicBlock *LLVMSucc = BI->getSuccessor(0);
const BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
FastEmitBranch(MSucc);
return true;
@ -782,7 +782,7 @@ unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
}
unsigned FastISel::FastEmit_f(MVT, MVT,
unsigned, ConstantFP * /*FPImm*/) {
unsigned, const ConstantFP * /*FPImm*/) {
return 0;
}
@ -794,7 +794,7 @@ unsigned FastISel::FastEmit_ri(MVT, MVT,
unsigned FastISel::FastEmit_rf(MVT, MVT,
unsigned, unsigned /*Op0*/,
ConstantFP * /*FPImm*/) {
const ConstantFP * /*FPImm*/) {
return 0;
}
@ -827,7 +827,7 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
/// FastEmit_rf. If that fails, it materializes the immediate into a register
/// and try FastEmit_rr instead.
unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
unsigned Op0, ConstantFP *FPImm,
unsigned Op0, const ConstantFP *FPImm,
MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the rf form.
unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
@ -937,7 +937,7 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, ConstantFP *FPImm) {
unsigned Op0, const ConstantFP *FPImm) {
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);

View File

@ -316,7 +316,7 @@ void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
// Gather all the type infos for this landing pad and pass them along to
// MachineModuleInfo.
std::vector<GlobalVariable *> TyInfo;
std::vector<const GlobalVariable *> TyInfo;
unsigned N = I.getNumOperands();
for (unsigned i = N - 1; i > 2; --i) {

View File

@ -531,10 +531,10 @@ MachineInstr *InstrEmitter::EmitDbgValue(SDDbgValue *SD,
AddOperand(&*MIB, Op, (*MIB).getNumOperands(), &II, VRBaseMap,
true /*IsDebug*/);
} else if (SD->getKind() == SDDbgValue::CONST) {
Value *V = SD->getConst();
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
const Value *V = SD->getConst();
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
MIB.addImm(CI->getSExtValue());
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
MIB.addFPImm(CF);
} else {
// Could be an Undef. In any case insert an Undef so we can see what we

View File

@ -41,7 +41,7 @@ private:
SDNode *Node; // valid for expressions
unsigned ResNo; // valid for expressions
} s;
Value *Const; // valid for constants
const Value *Const; // valid for constants
unsigned FrameIx; // valid for stack objects
} u;
MDNode *mdPtr;
@ -60,7 +60,8 @@ public:
}
// Constructor for constants.
SDDbgValue(MDNode *mdP, Value *C, uint64_t off, DebugLoc dl, unsigned O) :
SDDbgValue(MDNode *mdP, const Value *C, uint64_t off, DebugLoc dl,
unsigned O) :
mdPtr(mdP), Offset(off), DL(dl), Order(O), Invalid(false) {
kind = CONST;
u.Const = C;
@ -86,7 +87,7 @@ public:
unsigned getResNo() { assert (kind==SDNODE); return u.s.ResNo; }
// Returns the Value* for a constant
Value *getConst() { assert (kind==CONST); return u.Const; }
const Value *getConst() { assert (kind==CONST); return u.Const; }
// Returns the FrameIx for a stack object
unsigned getFrameIx() { assert (kind==FRAMEIX); return u.FrameIx; }

View File

@ -1048,7 +1048,7 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
return SDValue(N, 0);
}
SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT,
SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
unsigned Alignment, int Offset,
bool isTarget,
unsigned char TargetFlags) {
@ -1319,7 +1319,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
}
SDValue SelectionDAG::getBlockAddress(BlockAddress *BA, EVT VT,
SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
bool isTarget,
unsigned char TargetFlags) {
unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
@ -2270,7 +2270,7 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const {
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
if (!GA) return false;
if (GA->getOffset() != 0) return false;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
const GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal());
if (!GV) return false;
return MF->getMMI().hasDebugInfo();
}
@ -3195,7 +3195,7 @@ static bool isMemSrcFromString(SDValue Src, std::string &Str) {
if (!G)
return false;
GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
const GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
if (GV && GetConstantStringInfo(GV, Str, SrcDelta, false))
return true;
@ -4935,7 +4935,7 @@ SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
}
SDDbgValue *
SelectionDAG::getDbgValue(MDNode *MDPtr, Value *C, uint64_t Off,
SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
DebugLoc DL, unsigned O) {
return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
}
@ -6169,8 +6169,8 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
return true;
}
GlobalValue *GV1 = NULL;
GlobalValue *GV2 = NULL;
const GlobalValue *GV1 = NULL;
const GlobalValue *GV2 = NULL;
int64_t Offset1 = 0;
int64_t Offset2 = 0;
bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
@ -6185,14 +6185,14 @@ bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
/// it cannot be inferred.
unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
// If this is a GlobalAddress + cst, return the alignment.
GlobalValue *GV;
const GlobalValue *GV;
int64_t GVOffset = 0;
if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
// If GV has specified alignment, then use it. Otherwise, use the preferred
// alignment.
unsigned Align = GV->getAlignment();
if (!Align) {
if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
if (GVar->hasInitializer()) {
const TargetData *TD = TLI.getTargetData();
Align = TD->getPreferredAlignment(GVar);

View File

@ -613,11 +613,11 @@ void SelectionDAGBuilder::AssignOrderingToNode(const SDNode *Node) {
AssignOrderingToNode(Node->getOperand(I).getNode());
}
void SelectionDAGBuilder::visit(Instruction &I) {
void SelectionDAGBuilder::visit(const Instruction &I) {
visit(I.getOpcode(), I);
}
void SelectionDAGBuilder::visit(unsigned Opcode, User &I) {
void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
// Note: this doesn't use InstVisitor, because it has to work with
// ConstantExpr's in addition to instructions.
switch (Opcode) {
@ -812,7 +812,7 @@ static void getReturnInfo(const Type* ReturnType,
}
}
void SelectionDAGBuilder::visitRet(ReturnInst &I) {
void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
SDValue Chain = getControlRoot();
SmallVector<ISD::OutputArg, 8> Outs;
FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
@ -917,7 +917,7 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) {
/// CopyToExportRegsIfNeeded - If the given value has virtual registers
/// created for it, emit nodes to copy the value into the virtual
/// registers.
void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
if (!V->use_empty()) {
DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
if (VMI != FuncInfo.ValueMap.end())
@ -928,7 +928,7 @@ void SelectionDAGBuilder::CopyToExportRegsIfNeeded(Value *V) {
/// ExportFromCurrentBlock - If this condition isn't known to be exported from
/// the current basic block, add it to ValueMap now so that we'll get a
/// CopyTo/FromReg.
void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
// No need to export constants.
if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
@ -939,11 +939,11 @@ void SelectionDAGBuilder::ExportFromCurrentBlock(Value *V) {
CopyValueToVirtualRegister(V, Reg);
}
bool SelectionDAGBuilder::isExportableFromCurrentBlock(Value *V,
bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
const BasicBlock *FromBB) {
// The operands of the setcc have to be in this block. We don't know
// how to export them from some other block.
if (Instruction *VI = dyn_cast<Instruction>(V)) {
if (const Instruction *VI = dyn_cast<Instruction>(V)) {
// Can export from current BB.
if (VI->getParent() == FromBB)
return true;
@ -977,7 +977,7 @@ static bool InBlock(const Value *V, const BasicBlock *BB) {
/// AND operator tree.
///
void
SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB) {
@ -985,7 +985,7 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
// If the leaf of the tree is a comparison, merge the condition into
// the caseblock.
if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
// The operands of the cmp have to be in this block. We don't know
// how to export them from some other block. If this is the first block
// of the sequence, no exporting is needed.
@ -993,9 +993,9 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
(isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
ISD::CondCode Condition;
if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
Condition = getICmpCondCode(IC->getPredicate());
} else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
} else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
Condition = getFCmpCondCode(FC->getPredicate());
} else {
Condition = ISD::SETEQ; // silence warning.
@ -1016,13 +1016,13 @@ SelectionDAGBuilder::EmitBranchForMergedCondition(Value *Cond,
}
/// FindMergedConditions - If Cond is an expression like
void SelectionDAGBuilder::FindMergedConditions(Value *Cond,
void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB,
unsigned Opc) {
// If this node is not part of the or/and tree, emit it as a branch.
Instruction *BOp = dyn_cast<Instruction>(Cond);
const Instruction *BOp = dyn_cast<Instruction>(Cond);
if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
(unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
BOp->getParent() != CurBB->getBasicBlock() ||
@ -1102,7 +1102,7 @@ SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
return true;
}
void SelectionDAGBuilder::visitBr(BranchInst &I) {
void SelectionDAGBuilder::visitBr(const BranchInst &I) {
// Update machine-CFG edges.
MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
@ -1127,7 +1127,7 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
// If this condition is one of the special cases we handle, do special stuff
// now.
Value *CondVal = I.getCondition();
const Value *CondVal = I.getCondition();
MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
// If this is a series of conditions that are or'd or and'd together, emit
@ -1145,7 +1145,7 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) {
// cmp D, E
// jle foo
//
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
if (BOp->hasOneUse() &&
(BOp->getOpcode() == Instruction::And ||
BOp->getOpcode() == Instruction::Or)) {
@ -1417,7 +1417,7 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB,
DAG.setRoot(BrAnd);
}
void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
// Retrieve successors.
MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
@ -1442,14 +1442,14 @@ void SelectionDAGBuilder::visitInvoke(InvokeInst &I) {
DAG.getBasicBlock(Return)));
}
void SelectionDAGBuilder::visitUnwind(UnwindInst &I) {
void SelectionDAGBuilder::visitUnwind(const UnwindInst &I) {
}
/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
/// small case ranges).
bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default) {
Case& BackCase = *(CR.Range.second-1);
@ -1503,7 +1503,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
FallThrough = Default;
}
Value *RHS, *LHS, *MHS;
const Value *RHS, *LHS, *MHS;
ISD::CondCode CC;
if (I->High == I->Low) {
// This is just small small case range :) containing exactly 1 case
@ -1546,7 +1546,7 @@ static APInt ComputeRange(const APInt &First, const APInt &Last) {
/// handleJTSwitchCase - Emit jumptable for current switch case range
bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default) {
Case& FrontCase = *CR.Range.first;
Case& BackCase = *(CR.Range.second-1);
@ -1641,7 +1641,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec& CR,
/// 2 subtrees.
bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default) {
// Get the MachineFunction which holds the current MBB. This is used when
// inserting any additional MBBs necessary to represent the switch.
@ -1769,7 +1769,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
/// of masks and emit bit tests with these masks.
bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default){
EVT PTy = TLI.getPointerTy();
unsigned IntPtrBits = PTy.getSizeInBits();
@ -1939,7 +1939,7 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
return numCmps;
}
void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
// Figure out which block is immediately after the current one.
MachineBasicBlock *NextBlock = 0;
MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
@ -1971,7 +1971,7 @@ void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
// Get the Value to be switched on and default basic blocks, which will be
// inserted into CaseBlock records, representing basic blocks in the binary
// search tree.
Value *SV = SI.getOperand(0);
const Value *SV = SI.getOperand(0);
// Push the initial CaseRec onto the worklist
CaseRecVector WorkList;
@ -2002,7 +2002,7 @@ void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) {
}
}
void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
// Update machine-CFG edges with unique successors.
SmallVector<BasicBlock*, 32> succs;
succs.reserve(I.getNumSuccessors());
@ -2018,7 +2018,7 @@ void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) {
getValue(I.getAddress())));
}
void SelectionDAGBuilder::visitFSub(User &I) {
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
const Type *Ty = I.getType();
if (Ty->isVectorTy()) {
@ -2048,14 +2048,14 @@ void SelectionDAGBuilder::visitFSub(User &I) {
visitBinary(I, ISD::FSUB);
}
void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) {
void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
Op1.getValueType(), Op1, Op2));
}
void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
if (!I.getType()->isVectorTy() &&
@ -2089,11 +2089,11 @@ void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) {
Op1.getValueType(), Op1, Op2));
}
void SelectionDAGBuilder::visitICmp(User &I) {
void SelectionDAGBuilder::visitICmp(const User &I) {
ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
predicate = IC->getPredicate();
else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
predicate = ICmpInst::Predicate(IC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
@ -2103,11 +2103,11 @@ void SelectionDAGBuilder::visitICmp(User &I) {
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
}
void SelectionDAGBuilder::visitFCmp(User &I) {
void SelectionDAGBuilder::visitFCmp(const User &I) {
FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
predicate = FC->getPredicate();
else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
predicate = FCmpInst::Predicate(FC->getPredicate());
SDValue Op1 = getValue(I.getOperand(0));
SDValue Op2 = getValue(I.getOperand(1));
@ -2116,7 +2116,7 @@ void SelectionDAGBuilder::visitFCmp(User &I) {
setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
}
void SelectionDAGBuilder::visitSelect(User &I) {
void SelectionDAGBuilder::visitSelect(const User &I) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I.getType(), ValueVTs);
unsigned NumValues = ValueVTs.size();
@ -2141,14 +2141,14 @@ void SelectionDAGBuilder::visitSelect(User &I) {
&Values[0], NumValues));
}
void SelectionDAGBuilder::visitTrunc(User &I) {
void SelectionDAGBuilder::visitTrunc(const User &I) {
// TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitZExt(User &I) {
void SelectionDAGBuilder::visitZExt(const User &I) {
// ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// ZExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
@ -2156,7 +2156,7 @@ void SelectionDAGBuilder::visitZExt(User &I) {
setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSExt(User &I) {
void SelectionDAGBuilder::visitSExt(const User &I) {
// SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
// SExt also can't be a cast to bool for same reason. So, nothing much to do
SDValue N = getValue(I.getOperand(0));
@ -2164,7 +2164,7 @@ void SelectionDAGBuilder::visitSExt(User &I) {
setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPTrunc(User &I) {
void SelectionDAGBuilder::visitFPTrunc(const User &I) {
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
@ -2172,42 +2172,42 @@ void SelectionDAGBuilder::visitFPTrunc(User &I) {
DestVT, N, DAG.getIntPtrConstant(0)));
}
void SelectionDAGBuilder::visitFPExt(User &I){
void SelectionDAGBuilder::visitFPExt(const User &I){
// FPTrunc is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToUI(User &I) {
void SelectionDAGBuilder::visitFPToUI(const User &I) {
// FPToUI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitFPToSI(User &I) {
void SelectionDAGBuilder::visitFPToSI(const User &I) {
// FPToSI is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitUIToFP(User &I) {
void SelectionDAGBuilder::visitUIToFP(const User &I) {
// UIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitSIToFP(User &I){
void SelectionDAGBuilder::visitSIToFP(const User &I){
// SIToFP is never a no-op cast, no need to check
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
}
void SelectionDAGBuilder::visitPtrToInt(User &I) {
void SelectionDAGBuilder::visitPtrToInt(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
@ -2216,7 +2216,7 @@ void SelectionDAGBuilder::visitPtrToInt(User &I) {
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
void SelectionDAGBuilder::visitIntToPtr(User &I) {
void SelectionDAGBuilder::visitIntToPtr(const User &I) {
// What to do depends on the size of the integer and the size of the pointer.
// We can either truncate, zero extend, or no-op, accordingly.
SDValue N = getValue(I.getOperand(0));
@ -2225,7 +2225,7 @@ void SelectionDAGBuilder::visitIntToPtr(User &I) {
setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
}
void SelectionDAGBuilder::visitBitCast(User &I) {
void SelectionDAGBuilder::visitBitCast(const User &I) {
SDValue N = getValue(I.getOperand(0));
EVT DestVT = TLI.getValueType(I.getType());
@ -2238,7 +2238,7 @@ void SelectionDAGBuilder::visitBitCast(User &I) {
setValue(&I, N); // noop cast.
}
void SelectionDAGBuilder::visitInsertElement(User &I) {
void SelectionDAGBuilder::visitInsertElement(const User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InVal = getValue(I.getOperand(1));
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
@ -2249,7 +2249,7 @@ void SelectionDAGBuilder::visitInsertElement(User &I) {
InVec, InVal, InIdx));
}
void SelectionDAGBuilder::visitExtractElement(User &I) {
void SelectionDAGBuilder::visitExtractElement(const User &I) {
SDValue InVec = getValue(I.getOperand(0));
SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
TLI.getPointerTy(),
@ -2268,7 +2268,7 @@ static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
return true;
}
void SelectionDAGBuilder::visitShuffleVector(User &I) {
void SelectionDAGBuilder::visitShuffleVector(const User &I) {
SmallVector<int, 8> Mask;
SDValue Src1 = getValue(I.getOperand(0));
SDValue Src2 = getValue(I.getOperand(1));
@ -2449,7 +2449,7 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) {
VT, &Ops[0], Ops.size()));
}
void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
const Type *AggTy = I.getType();
@ -2490,7 +2490,7 @@ void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) {
&Values[0], NumAggValues));
}
void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Type *AggTy = Op0->getType();
const Type *ValTy = I.getType();
@ -2518,13 +2518,13 @@ void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) {
&Values[0], NumValValues));
}
void SelectionDAGBuilder::visitGetElementPtr(User &I) {
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(I.getOperand(0));
const Type *Ty = I.getOperand(0)->getType();
for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
Value *Idx = *OI;
const Value *Idx = *OI;
if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
@ -2544,7 +2544,7 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) {
Ty = cast<SequentialType>(Ty)->getElementType();
// If this is a constant subscript, handle it quickly.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
@ -2595,7 +2595,7 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) {
setValue(&I, N);
}
void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
// If this is a fixed sized alloca in the entry block of the function,
// allocate it statically on the stack.
if (FuncInfo.StaticAllocaMap.count(&I))
@ -2647,7 +2647,7 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) {
FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
}
void SelectionDAGBuilder::visitLoad(LoadInst &I) {
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
const Value *SV = I.getOperand(0);
SDValue Ptr = getValue(SV);
@ -2707,9 +2707,9 @@ void SelectionDAGBuilder::visitLoad(LoadInst &I) {
&Values[0], NumValues));
}
void SelectionDAGBuilder::visitStore(StoreInst &I) {
Value *SrcV = I.getOperand(0);
Value *PtrV = I.getOperand(1);
void SelectionDAGBuilder::visitStore(const StoreInst &I) {
const Value *SrcV = I.getOperand(0);
const Value *PtrV = I.getOperand(1);
SmallVector<EVT, 4> ValueVTs;
SmallVector<uint64_t, 4> Offsets;
@ -2746,7 +2746,7 @@ void SelectionDAGBuilder::visitStore(StoreInst &I) {
/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
/// node.
void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I,
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
unsigned Intrinsic) {
bool HasChain = !I.doesNotAccessMemory();
bool OnlyLoad = HasChain && I.onlyReadsMemory();
@ -2872,7 +2872,8 @@ getF32Constant(SelectionDAG &DAG, unsigned Flt) {
/// visitIntrinsicCall: I is a call instruction
/// Op is the associated NodeType for I
const char *
SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
SelectionDAGBuilder::implVisitBinaryAtomic(const CallInst& I,
ISD::NodeType Op) {
SDValue Root = getRoot();
SDValue L =
DAG.getAtomic(Op, getCurDebugLoc(),
@ -2888,7 +2889,7 @@ SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
const char *
SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
SDValue Op1 = getValue(I.getOperand(1));
SDValue Op2 = getValue(I.getOperand(2));
@ -2900,7 +2901,7 @@ SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
/// visitExp - Lower an exp intrinsic. Handles the special sequences for
/// limited-precision mode.
void
SelectionDAGBuilder::visitExp(CallInst &I) {
SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
@ -3026,7 +3027,7 @@ SelectionDAGBuilder::visitExp(CallInst &I) {
/// visitLog - Lower a log intrinsic. Handles the special sequences for
/// limited-precision mode.
void
SelectionDAGBuilder::visitLog(CallInst &I) {
SelectionDAGBuilder::visitLog(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
@ -3136,7 +3137,7 @@ SelectionDAGBuilder::visitLog(CallInst &I) {
/// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
SelectionDAGBuilder::visitLog2(CallInst &I) {
SelectionDAGBuilder::visitLog2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
@ -3245,7 +3246,7 @@ SelectionDAGBuilder::visitLog2(CallInst &I) {
/// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
SelectionDAGBuilder::visitLog10(CallInst &I) {
SelectionDAGBuilder::visitLog10(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
@ -3347,7 +3348,7 @@ SelectionDAGBuilder::visitLog10(CallInst &I) {
/// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
/// limited-precision mode.
void
SelectionDAGBuilder::visitExp2(CallInst &I) {
SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue result;
DebugLoc dl = getCurDebugLoc();
@ -3461,9 +3462,9 @@ SelectionDAGBuilder::visitExp2(CallInst &I) {
/// visitPow - Lower a pow intrinsic. Handles the special sequences for
/// limited-precision mode with x == 10.0f.
void
SelectionDAGBuilder::visitPow(CallInst &I) {
SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue result;
Value *Val = I.getOperand(1);
const Value *Val = I.getOperand(1);
DebugLoc dl = getCurDebugLoc();
bool IsExp10 = false;
@ -3650,7 +3651,7 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
/// we want to emit this as a call to a named external function, return the name
/// otherwise lower it and return null.
const char *
SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
DebugLoc dl = getCurDebugLoc();
SDValue Res;
@ -3742,17 +3743,17 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
if (OptLevel != CodeGenOpt::None)
// FIXME: Variable debug info is not supported here.
return 0;
DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
return 0;
MDNode *Variable = DI.getVariable();
Value *Address = DI.getAddress();
const Value *Address = DI.getAddress();
if (!Address)
return 0;
if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
Address = BCI->getOperand(0);
AllocaInst *AI = dyn_cast<AllocaInst>(Address);
const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI)
return 0;
@ -3768,13 +3769,13 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
return 0;
}
case Intrinsic::dbg_value: {
DbgValueInst &DI = cast<DbgValueInst>(I);
const DbgValueInst &DI = cast<DbgValueInst>(I);
if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None))
return 0;
MDNode *Variable = DI.getVariable();
uint64_t Offset = DI.getOffset();
Value *V = DI.getValue();
const Value *V = DI.getValue();
if (!V)
return 0;
@ -3800,9 +3801,9 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
// Build a debug info table entry.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
V = BCI->getOperand(0);
AllocaInst *AI = dyn_cast<AllocaInst>(V);
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI)
return 0;
@ -3922,7 +3923,7 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
}
EVT DestVT = TLI.getValueType(I.getType());
Value *Op1 = I.getOperand(1);
const Value *Op1 = I.getOperand(1);
Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
DAG.getValueType(DestVT),
DAG.getValueType(getValue(Op1).getValueType()),
@ -4091,8 +4092,8 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::gcroot:
if (GFI) {
Value *Alloca = I.getOperand(1);
Constant *TypeMap = cast<Constant>(I.getOperand(2));
const Value *Alloca = I.getOperand(1);
const Constant *TypeMap = cast<Constant>(I.getOperand(2));
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
GFI->addStackRoot(FI->getIndex(), TypeMap);
@ -4196,7 +4197,7 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
///
/// This function only tests target-independent requirements.
static bool
isInTailCallPosition(CallSite CS, Attributes CalleeRetAttr,
isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
const TargetLowering &TLI) {
const Instruction *I = CS.getInstruction();
const BasicBlock *ExitBB = I->getParent();
@ -4275,7 +4276,7 @@ isInTailCallPosition(CallSite CS, Attributes CalleeRetAttr,
return true;
}
void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
@ -4323,7 +4324,7 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
RetTy = Type::getVoidTy(FTy->getContext());
}
for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
SDValue ArgNode = getValue(*i);
Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
@ -4454,12 +4455,12 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee,
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
/// value is equal or not-equal to zero.
static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
UI != E; ++UI) {
if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
if (IC->isEquality())
if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
if (C->isNullValue())
continue;
// Unknown instruction.
@ -4468,17 +4469,20 @@ static bool IsOnlyUsedInZeroEqualityComparison(Value *V) {
return true;
}
static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
const Type *LoadTy,
SelectionDAGBuilder &Builder) {
// Check to see if this load can be trivially constant folded, e.g. if the
// input is from a string literal.
if (Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
// Cast pointer to the type we really want to load.
LoadInput = ConstantExpr::getBitCast(LoadInput,
LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
PointerType::getUnqual(LoadTy));
if (Constant *LoadCst = ConstantFoldLoadFromConstPtr(LoadInput, Builder.TD))
if (const Constant *LoadCst =
ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
Builder.TD))
return Builder.getValue(LoadCst);
}
@ -4511,18 +4515,18 @@ static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy,
/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
/// If so, return true and lower it, otherwise return false and it will be
/// lowered like a normal call.
bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
// Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
if (I.getNumOperands() != 4)
return false;
Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
const Value *LHS = I.getOperand(1), *RHS = I.getOperand(2);
if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
!I.getOperand(3)->getType()->isIntegerTy() ||
!I.getType()->isIntegerTy())
return false;
ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
const ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3));
// memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
// memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
@ -4588,7 +4592,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) {
}
void SelectionDAGBuilder::visitCall(CallInst &I) {
void SelectionDAGBuilder::visitCall(const CallInst &I) {
const char *RenameFn = 0;
if (Function *F = I.getCalledFunction()) {
if (F->isDeclaration()) {
@ -5160,8 +5164,8 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
/// visitInlineAsm - Handle a call to an InlineAsm object.
///
void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
/// ConstraintOperands - Information about all of the constraints.
std::vector<SDISelAsmOperandInfo> ConstraintOperands;
@ -5197,7 +5201,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
case InlineAsm::isOutput:
// Indirect outputs just consume an argument.
if (OpInfo.isIndirect) {
OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
}
@ -5214,7 +5218,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
++ResNo;
break;
case InlineAsm::isInput:
OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
break;
case InlineAsm::isClobber:
// Nothing to do.
@ -5227,7 +5231,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// Strip bitcasts, if any. This mostly comes up for functions.
OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
} else {
OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
@ -5280,7 +5284,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
// If the operand is a float, integer, or vector constant, spill to a
// constant pool entry to get its address.
Value *OpVal = OpInfo.CallOperandVal;
const Value *OpVal = OpInfo.CallOperandVal;
if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
isa<ConstantVector>(OpVal)) {
OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
@ -5572,17 +5576,16 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
return;
}
std::vector<std::pair<SDValue, Value*> > StoresToEmit;
std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
// Process indirect outputs, first output all of the flagged copies out of
// physregs.
for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
Value *Ptr = IndirectStoresToEmit[i].second;
const Value *Ptr = IndirectStoresToEmit[i].second;
SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
Chain, &Flag);
StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
}
// Emit the non-flagged stores from the physregs.
@ -5603,14 +5606,14 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) {
DAG.setRoot(Chain);
}
void SelectionDAGBuilder::visitVAStart(CallInst &I) {
void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
DAG.getSrcValue(I.getOperand(1))));
}
void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)));
@ -5618,14 +5621,14 @@ void SelectionDAGBuilder::visitVAArg(VAArgInst &I) {
DAG.setRoot(V.getValue(1));
}
void SelectionDAGBuilder::visitVAEnd(CallInst &I) {
void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
DAG.getSrcValue(I.getOperand(1))));
}
void SelectionDAGBuilder::visitVACopy(CallInst &I) {
void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
MVT::Other, getRoot(),
getValue(I.getOperand(1)),
@ -5807,7 +5810,8 @@ SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
return SDValue();
}
void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
void
SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
SDValue Op = getValue(V);
assert((Op.getOpcode() != ISD::CopyFromReg ||
cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
@ -5822,9 +5826,9 @@ void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
#include "llvm/CodeGen/SelectionDAGISel.h"
void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
// If this is the entry block, emit arguments.
Function &F = *LLVMBB->getParent();
const Function &F = *LLVMBB->getParent();
SelectionDAG &DAG = SDB->DAG;
SDValue OldRoot = DAG.getRoot();
DebugLoc dl = SDB->getCurDebugLoc();
@ -5856,7 +5860,7 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
// Set up the incoming argument description vector.
unsigned Idx = 1;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
I != E; ++I, ++Idx) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, I->getType(), ValueVTs);
@ -5958,7 +5962,7 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
++i;
}
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
++I, ++Idx) {
SmallVector<SDValue, 4> ArgValues;
SmallVector<EVT, 4> ValueVTs;
@ -6012,15 +6016,15 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
/// the end.
///
void
SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
TerminatorInst *TI = LLVMBB->getTerminator();
SelectionDAGISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
BasicBlock *SuccBB = TI->getSuccessor(succ);
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin())) continue;
MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
@ -6029,20 +6033,19 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
if (!SuccsHandled.insert(SuccMBB)) continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
PHINode *PN;
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
for (BasicBlock::iterator I = SuccBB->begin();
(PN = dyn_cast<PHINode>(I)); ++I) {
for (BasicBlock::const_iterator I = SuccBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
// Ignore dead phi's.
if (PN->use_empty()) continue;
unsigned Reg;
Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
if (Constant *C = dyn_cast<Constant>(PHIOp)) {
if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
unsigned &RegOut = SDB->ConstantsOut[C];
if (RegOut == 0) {
RegOut = FuncInfo->CreateRegForValue(C);
@ -6081,9 +6084,9 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
/// creating SelectionDAG nodes.
///
bool
SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(const BasicBlock *LLVMBB,
FastISel *F) {
TerminatorInst *TI = LLVMBB->getTerminator();
const TerminatorInst *TI = LLVMBB->getTerminator();
SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
unsigned OrigNumPHINodesToUpdate = SDB->PHINodesToUpdate.size();
@ -6091,7 +6094,7 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
// Check successor nodes' PHI nodes that expect a constant to be available
// from this block.
for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
BasicBlock *SuccBB = TI->getSuccessor(succ);
const BasicBlock *SuccBB = TI->getSuccessor(succ);
if (!isa<PHINode>(SuccBB->begin())) continue;
MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
@ -6100,13 +6103,12 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
if (!SuccsHandled.insert(SuccMBB)) continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
PHINode *PN;
// At this point we know that there is a 1-1 correspondence between LLVM PHI
// nodes and Machine PHI nodes, but the incoming operands have not been
// emitted yet.
for (BasicBlock::iterator I = SuccBB->begin();
(PN = dyn_cast<PHINode>(I)); ++I) {
for (BasicBlock::const_iterator I = SuccBB->begin();
const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
// Ignore dead phi's.
if (PN->use_empty()) continue;
@ -6127,7 +6129,7 @@ SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
}
}
Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
unsigned Reg = F->getRegForValue(PHIOp);
if (Reg == 0) {

View File

@ -142,15 +142,16 @@ private:
/// CaseRec - A struct with ctor used in lowering switches to a binary tree
/// of conditional branches.
struct CaseRec {
CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
CaseRec(MachineBasicBlock *bb, const Constant *lt, const Constant *ge,
CaseRange r) :
CaseBB(bb), LT(lt), GE(ge), Range(r) {}
/// CaseBB - The MBB in which to emit the compare and branch
MachineBasicBlock *CaseBB;
/// LT, GE - If nonzero, we know the current case value must be less-than or
/// greater-than-or-equal-to these Constants.
Constant *LT;
Constant *GE;
const Constant *LT;
const Constant *GE;
/// Range - A pair of iterators representing the range of case values to be
/// processed at this point in the binary search tree.
CaseRange Range;
@ -181,7 +182,8 @@ private:
/// SelectionDAGBuilder and SDISel for the code generation of additional basic
/// blocks needed by multi-case switch statements.
struct CaseBlock {
CaseBlock(ISD::CondCode cc, Value *cmplhs, Value *cmprhs, Value *cmpmiddle,
CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
const Value *cmpmiddle,
MachineBasicBlock *truebb, MachineBasicBlock *falsebb,
MachineBasicBlock *me)
: CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
@ -191,7 +193,7 @@ private:
// CmpLHS/CmpRHS/CmpMHS - The LHS/MHS/RHS of the comparison to emit.
// Emit by default LHS op RHS. MHS is used for range comparisons:
// If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
Value *CmpLHS, *CmpMHS, *CmpRHS;
const Value *CmpLHS, *CmpMHS, *CmpRHS;
// TrueBB/FalseBB - the block to branch to if the setcc is true/false.
MachineBasicBlock *TrueBB, *FalseBB;
// ThisBB - the block into which to emit the code for the setcc and branches
@ -213,12 +215,12 @@ private:
MachineBasicBlock *Default;
};
struct JumpTableHeader {
JumpTableHeader(APInt F, APInt L, Value *SV, MachineBasicBlock *H,
JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
bool E = false):
First(F), Last(L), SValue(SV), HeaderBB(H), Emitted(E) {}
APInt First;
APInt Last;
Value *SValue;
const Value *SValue;
MachineBasicBlock *HeaderBB;
bool Emitted;
};
@ -235,7 +237,7 @@ private:
typedef SmallVector<BitTestCase, 3> BitTestInfo;
struct BitTestBlock {
BitTestBlock(APInt F, APInt R, Value* SV,
BitTestBlock(APInt F, APInt R, const Value* SV,
unsigned Rg, bool E,
MachineBasicBlock* P, MachineBasicBlock* D,
const BitTestInfo& C):
@ -243,7 +245,7 @@ private:
Parent(P), Default(D), Cases(C) { }
APInt First;
APInt Range;
Value *SValue;
const Value *SValue;
unsigned Reg;
bool Emitted;
MachineBasicBlock *Parent;
@ -280,7 +282,7 @@ public:
// Emit PHI-node-operand constants only once even if used by multiple
// PHI nodes.
DenseMap<Constant*, unsigned> ConstantsOut;
DenseMap<const Constant *, unsigned> ConstantsOut;
/// FuncInfo - Information about the function as a whole.
///
@ -336,16 +338,16 @@ public:
unsigned getSDNodeOrder() const { return SDNodeOrder; }
void CopyValueToVirtualRegister(Value *V, unsigned Reg);
void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
/// AssignOrderingToNode - Assign an ordering to the node. The order is gotten
/// from how the code appeared in the source. The ordering is used by the
/// scheduler to effectively turn off scheduling.
void AssignOrderingToNode(const SDNode *Node);
void visit(Instruction &I);
void visit(const Instruction &I);
void visit(unsigned Opcode, User &I);
void visit(unsigned Opcode, const User &I);
void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
@ -361,43 +363,43 @@ public:
std::set<unsigned> &OutputRegs,
std::set<unsigned> &InputRegs);
void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB,
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
unsigned Opc);
void EmitBranchForMergedCondition(Value *Cond, MachineBasicBlock *TBB,
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
MachineBasicBlock *CurBB);
bool ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases);
bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB);
void CopyToExportRegsIfNeeded(Value *V);
void ExportFromCurrentBlock(Value *V);
void LowerCallTo(CallSite CS, SDValue Callee, bool IsTailCall,
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
void CopyToExportRegsIfNeeded(const Value *V);
void ExportFromCurrentBlock(const Value *V);
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
MachineBasicBlock *LandingPad = NULL);
private:
// Terminator instructions.
void visitRet(ReturnInst &I);
void visitBr(BranchInst &I);
void visitSwitch(SwitchInst &I);
void visitIndirectBr(IndirectBrInst &I);
void visitUnreachable(UnreachableInst &I) { /* noop */ }
void visitRet(const ReturnInst &I);
void visitBr(const BranchInst &I);
void visitSwitch(const SwitchInst &I);
void visitIndirectBr(const IndirectBrInst &I);
void visitUnreachable(const UnreachableInst &I) { /* noop */ }
// Helpers for visitSwitch
bool handleSmallSwitchRange(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default);
bool handleJTSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default);
bool handleBTSplitSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default);
bool handleBitTestsSwitchCase(CaseRec& CR,
CaseRecVector& WorkList,
Value* SV,
const Value* SV,
MachineBasicBlock* Default);
public:
void visitSwitchCase(CaseBlock &CB);
@ -410,87 +412,87 @@ public:
private:
// These all get lowered before this pass.
void visitInvoke(InvokeInst &I);
void visitUnwind(UnwindInst &I);
void visitInvoke(const InvokeInst &I);
void visitUnwind(const UnwindInst &I);
void visitBinary(User &I, unsigned OpCode);
void visitShift(User &I, unsigned Opcode);
void visitAdd(User &I) { visitBinary(I, ISD::ADD); }
void visitFAdd(User &I) { visitBinary(I, ISD::FADD); }
void visitSub(User &I) { visitBinary(I, ISD::SUB); }
void visitFSub(User &I);
void visitMul(User &I) { visitBinary(I, ISD::MUL); }
void visitFMul(User &I) { visitBinary(I, ISD::FMUL); }
void visitURem(User &I) { visitBinary(I, ISD::UREM); }
void visitSRem(User &I) { visitBinary(I, ISD::SREM); }
void visitFRem(User &I) { visitBinary(I, ISD::FREM); }
void visitUDiv(User &I) { visitBinary(I, ISD::UDIV); }
void visitSDiv(User &I) { visitBinary(I, ISD::SDIV); }
void visitFDiv(User &I) { visitBinary(I, ISD::FDIV); }
void visitAnd (User &I) { visitBinary(I, ISD::AND); }
void visitOr (User &I) { visitBinary(I, ISD::OR); }
void visitXor (User &I) { visitBinary(I, ISD::XOR); }
void visitShl (User &I) { visitShift(I, ISD::SHL); }
void visitLShr(User &I) { visitShift(I, ISD::SRL); }
void visitAShr(User &I) { visitShift(I, ISD::SRA); }
void visitICmp(User &I);
void visitFCmp(User &I);
void visitBinary(const User &I, unsigned OpCode);
void visitShift(const User &I, unsigned Opcode);
void visitAdd(const User &I) { visitBinary(I, ISD::ADD); }
void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
void visitSub(const User &I) { visitBinary(I, ISD::SUB); }
void visitFSub(const User &I);
void visitMul(const User &I) { visitBinary(I, ISD::MUL); }
void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
void visitSDiv(const User &I) { visitBinary(I, ISD::SDIV); }
void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
void visitOr (const User &I) { visitBinary(I, ISD::OR); }
void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
void visitShl (const User &I) { visitShift(I, ISD::SHL); }
void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
void visitICmp(const User &I);
void visitFCmp(const User &I);
// Visit the conversion instructions
void visitTrunc(User &I);
void visitZExt(User &I);
void visitSExt(User &I);
void visitFPTrunc(User &I);
void visitFPExt(User &I);
void visitFPToUI(User &I);
void visitFPToSI(User &I);
void visitUIToFP(User &I);
void visitSIToFP(User &I);
void visitPtrToInt(User &I);
void visitIntToPtr(User &I);
void visitBitCast(User &I);
void visitTrunc(const User &I);
void visitZExt(const User &I);
void visitSExt(const User &I);
void visitFPTrunc(const User &I);
void visitFPExt(const User &I);
void visitFPToUI(const User &I);
void visitFPToSI(const User &I);
void visitUIToFP(const User &I);
void visitSIToFP(const User &I);
void visitPtrToInt(const User &I);
void visitIntToPtr(const User &I);
void visitBitCast(const User &I);
void visitExtractElement(User &I);
void visitInsertElement(User &I);
void visitShuffleVector(User &I);
void visitExtractElement(const User &I);
void visitInsertElement(const User &I);
void visitShuffleVector(const User &I);
void visitExtractValue(ExtractValueInst &I);
void visitInsertValue(InsertValueInst &I);
void visitExtractValue(const ExtractValueInst &I);
void visitInsertValue(const InsertValueInst &I);
void visitGetElementPtr(User &I);
void visitSelect(User &I);
void visitGetElementPtr(const User &I);
void visitSelect(const User &I);
void visitAlloca(AllocaInst &I);
void visitLoad(LoadInst &I);
void visitStore(StoreInst &I);
void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
void visitCall(CallInst &I);
bool visitMemCmpCall(CallInst &I);
void visitAlloca(const AllocaInst &I);
void visitLoad(const LoadInst &I);
void visitStore(const StoreInst &I);
void visitPHI(const PHINode &I) { } // PHI nodes are handled specially.
void visitCall(const CallInst &I);
bool visitMemCmpCall(const CallInst &I);
void visitInlineAsm(CallSite CS);
const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
void visitInlineAsm(ImmutableCallSite CS);
const char *visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
void visitPow(CallInst &I);
void visitExp2(CallInst &I);
void visitExp(CallInst &I);
void visitLog(CallInst &I);
void visitLog2(CallInst &I);
void visitLog10(CallInst &I);
void visitPow(const CallInst &I);
void visitExp2(const CallInst &I);
void visitExp(const CallInst &I);
void visitLog(const CallInst &I);
void visitLog2(const CallInst &I);
void visitLog10(const CallInst &I);
void visitVAStart(CallInst &I);
void visitVAArg(VAArgInst &I);
void visitVAEnd(CallInst &I);
void visitVACopy(CallInst &I);
void visitVAStart(const CallInst &I);
void visitVAArg(const VAArgInst &I);
void visitVAEnd(const CallInst &I);
void visitVACopy(const CallInst &I);
void visitUserOp1(Instruction &I) {
void visitUserOp1(const Instruction &I) {
llvm_unreachable("UserOp1 should not exist at instruction selection time!");
}
void visitUserOp2(Instruction &I) {
void visitUserOp2(const Instruction &I) {
llvm_unreachable("UserOp2 should not exist at instruction selection time!");
}
const char *implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op);
const char *implVisitAluOverflow(CallInst &I, ISD::NodeType Op);
const char *implVisitBinaryAtomic(const CallInst& I, ISD::NodeType Op);
const char *implVisitAluOverflow(const CallInst &I, ISD::NodeType Op);
};
} // end namespace llvm

View File

@ -226,7 +226,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
/// SetDebugLoc - Update MF's and SDB's DebugLocs if debug information is
/// attached with this instruction.
static void SetDebugLoc(Instruction *I, SelectionDAGBuilder *SDB,
static void SetDebugLoc(const Instruction *I, SelectionDAGBuilder *SDB,
FastISel *FastIS, MachineFunction *MF) {
DebugLoc DL = I->getDebugLoc();
if (DL.isUnknown()) return;
@ -249,15 +249,16 @@ static void ResetDebugLoc(SelectionDAGBuilder *SDB, FastISel *FastIS) {
FastIS->setCurDebugLoc(DebugLoc());
}
void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
BasicBlock::iterator Begin,
BasicBlock::iterator End,
void SelectionDAGISel::SelectBasicBlock(const BasicBlock *LLVMBB,
BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall) {
SDB->setCurrentBasicBlock(BB);
// Lower all of the non-terminator instructions. If a call is emitted
// as a tail call, cease emitting nodes for this block.
for (BasicBlock::iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
for (BasicBlock::const_iterator I = Begin;
I != End && !SDB->HasTailCall; ++I) {
SetDebugLoc(I, SDB, 0, MF);
// Visit the instruction. Terminators are handled below.
@ -270,7 +271,7 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB,
if (!SDB->HasTailCall) {
// Ensure that all instructions which are used outside of their defining
// blocks are available as virtual registers. Invoke is handled elsewhere.
for (BasicBlock::iterator I = Begin; I != End; ++I)
for (BasicBlock::const_iterator I = Begin; I != End; ++I)
if (!isa<PHINode>(I) && !isa<InvokeInst>(I))
SDB->CopyToExportRegsIfNeeded(I);
@ -744,7 +745,7 @@ void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) {
}
}
void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn) {
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Initialize the Fast-ISel state, if needed.
FastISel *FastIS = 0;
if (EnableFastISel)
@ -756,13 +757,13 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn) {
);
// Iterate over all basic blocks in the function.
for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
BasicBlock *LLVMBB = &*I;
for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) {
const BasicBlock *LLVMBB = &*I;
BB = FuncInfo->MBBMap[LLVMBB];
BasicBlock::iterator const Begin = LLVMBB->begin();
BasicBlock::iterator const End = LLVMBB->end();
BasicBlock::iterator BI = Begin;
BasicBlock::const_iterator const Begin = LLVMBB->begin();
BasicBlock::const_iterator const End = LLVMBB->end();
BasicBlock::const_iterator BI = Begin;
// Lower any arguments needed in this block if this is the entry block.
bool SuppressFastISel = false;
@ -773,7 +774,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn) {
// fast-isel in the entry block.
if (FastIS) {
unsigned j = 1;
for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end();
for (Function::const_arg_iterator I = Fn.arg_begin(), E = Fn.arg_end();
I != E; ++I, ++j)
if (Fn.paramHasAttr(j, Attribute::ByVal)) {
if (EnableFastISelVerbose || EnableFastISelAbort)

View File

@ -2245,7 +2245,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA,
bool TargetLowering::isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
int64_t &Offset) const {
if (isa<GlobalAddressSDNode>(N)) {
GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N);

View File

@ -52,7 +52,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
unsigned char* Result = 0;
const std::vector<Function *> Personalities = MMI->getPersonalities();
const std::vector<const Function *> Personalities = MMI->getPersonalities();
EHFramePtr = EmitCommonEHFrame(Personalities[MMI->getPersonalityIndex()]);
Result = EmitEHFrame(Personalities[MMI->getPersonalityIndex()], EHFramePtr,
@ -201,7 +201,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
// Map all labels and get rid of any dead landing pads.
MMI->TidyLandingPads();
const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
if (PadInfos.empty()) return 0;
@ -450,7 +450,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
// Emit the type ids.
for (unsigned M = TypeInfos.size(); M; --M) {
GlobalVariable *GV = TypeInfos[M - 1];
const GlobalVariable *GV = TypeInfos[M - 1];
if (GV) {
if (TD->getPointerSize() == sizeof(int32_t))
@ -609,7 +609,7 @@ unsigned JITDwarfEmitter::GetDwarfTableSizeInBytes(MachineFunction& F,
FinalSize += GetExceptionTableSizeInBytes(&F);
const std::vector<Function *> Personalities = MMI->getPersonalities();
const std::vector<const Function *> Personalities = MMI->getPersonalities();
FinalSize +=
GetCommonEHFrameSizeInBytes(Personalities[MMI->getPersonalityIndex()]);
@ -782,7 +782,7 @@ JITDwarfEmitter::GetExceptionTableSizeInBytes(MachineFunction* MF) const {
// Map all labels and get rid of any dead landing pads.
MMI->TidyLandingPads();
const std::vector<GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<const GlobalVariable *> &TypeInfos = MMI->getTypeInfos();
const std::vector<unsigned> &FilterIds = MMI->getFilterIds();
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
if (PadInfos.empty()) return 0;

View File

@ -995,12 +995,13 @@ unsigned JITEmitter::GetSizeOfGlobalsInBytes(MachineFunction &MF) {
for (unsigned CurOp = 0; CurOp < NumOps; CurOp++) {
const MachineOperand &MO = MI.getOperand(CurOp);
if (MO.isGlobal()) {
GlobalValue* V = MO.getGlobal();
const GlobalValue* V = MO.getGlobal();
const GlobalVariable *GV = dyn_cast<const GlobalVariable>(V);
if (!GV)
continue;
// If seen in previous function, it will have an entry here.
if (TheJIT->getPointerToGlobalIfAvailable(GV))
if (TheJIT->getPointerToGlobalIfAvailable(
const_cast<GlobalVariable *>(GV)))
continue;
// If seen earlier in this function, it will have an entry here.
// FIXME: it should be possible to combine these tables, by

View File

@ -1050,7 +1050,7 @@ emitLoadConstPool(MachineBasicBlock &MBB,
unsigned PredReg) const {
MachineFunction &MF = *MBB.getParent();
MachineConstantPool *ConstantPool = MF.getConstantPool();
Constant *C =
const Constant *C =
ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);

View File

@ -150,7 +150,7 @@ namespace {
/// Routines that handle operands which add machine relocations which are
/// fixed up by the relocation stage.
void emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
bool MayNeedFarStub, bool Indirect,
intptr_t ACPV = 0);
void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
@ -249,14 +249,16 @@ unsigned ARMCodeEmitter::getMachineOpValue(const MachineInstr &MI,
/// emitGlobalAddress - Emit the specified address to the code stream.
///
void ARMCodeEmitter::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
void ARMCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
bool MayNeedFarStub, bool Indirect,
intptr_t ACPV) {
MachineRelocation MR = Indirect
? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
GV, ACPV, MayNeedFarStub)
const_cast<GlobalValue *>(GV),
ACPV, MayNeedFarStub)
: MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
GV, ACPV, MayNeedFarStub);
const_cast<GlobalValue *>(GV), ACPV,
MayNeedFarStub);
MCE.addRelocation(MR);
}
@ -391,7 +393,7 @@ void ARMCodeEmitter::emitConstPoolInstruction(const MachineInstr &MI) {
<< (void*)MCE.getCurrentPCValue() << " " << *ACPV << '\n');
assert(ACPV->isGlobalValue() && "unsupported constant pool value");
GlobalValue *GV = ACPV->getGV();
const GlobalValue *GV = ACPV->getGV();
if (GV) {
Reloc::Model RelocM = TM.getRelocationModel();
emitGlobalAddress(GV, ARM::reloc_arm_machine_cp_entry,
@ -403,7 +405,7 @@ void ARMCodeEmitter::emitConstPoolInstruction(const MachineInstr &MI) {
}
emitWordLE(0);
} else {
Constant *CV = MCPE.Val.ConstVal;
const Constant *CV = MCPE.Val.ConstVal;
DEBUG({
errs() << " ** Constant pool #" << CPI << " @ "
@ -415,7 +417,7 @@ void ARMCodeEmitter::emitConstPoolInstruction(const MachineInstr &MI) {
errs() << '\n';
});
if (GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
emitGlobalAddress(GV, ARM::reloc_arm_absolute, isa<Function>(GV), false);
emitWordLE(0);
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV)) {

View File

@ -21,7 +21,7 @@
#include <cstdlib>
using namespace llvm;
ARMConstantPoolValue::ARMConstantPoolValue(Constant *cval, unsigned id,
ARMConstantPoolValue::ARMConstantPoolValue(const Constant *cval, unsigned id,
ARMCP::ARMCPKind K,
unsigned char PCAdj,
const char *Modif,
@ -39,16 +39,17 @@ ARMConstantPoolValue::ARMConstantPoolValue(LLVMContext &C,
CVal(NULL), S(strdup(s)), LabelId(id), Kind(ARMCP::CPExtSymbol),
PCAdjust(PCAdj), Modifier(Modif), AddCurrentAddress(AddCA) {}
ARMConstantPoolValue::ARMConstantPoolValue(GlobalValue *gv, const char *Modif)
ARMConstantPoolValue::ARMConstantPoolValue(const GlobalValue *gv,
const char *Modif)
: MachineConstantPoolValue((const Type*)Type::getInt32Ty(gv->getContext())),
CVal(gv), S(NULL), LabelId(0), Kind(ARMCP::CPValue), PCAdjust(0),
Modifier(Modif) {}
GlobalValue *ARMConstantPoolValue::getGV() const {
const GlobalValue *ARMConstantPoolValue::getGV() const {
return dyn_cast_or_null<GlobalValue>(CVal);
}
BlockAddress *ARMConstantPoolValue::getBlockAddress() const {
const BlockAddress *ARMConstantPoolValue::getBlockAddress() const {
return dyn_cast_or_null<BlockAddress>(CVal);
}

View File

@ -36,7 +36,7 @@ namespace ARMCP {
/// represent PC-relative displacement between the address of the load
/// instruction and the constant being loaded, i.e. (&GV-(LPIC+8)).
class ARMConstantPoolValue : public MachineConstantPoolValue {
Constant *CVal; // Constant being loaded.
const Constant *CVal; // Constant being loaded.
const char *S; // ExtSymbol being loaded.
unsigned LabelId; // Label id of the load.
ARMCP::ARMCPKind Kind; // Kind of constant.
@ -46,20 +46,20 @@ class ARMConstantPoolValue : public MachineConstantPoolValue {
bool AddCurrentAddress;
public:
ARMConstantPoolValue(Constant *cval, unsigned id,
ARMConstantPoolValue(const Constant *cval, unsigned id,
ARMCP::ARMCPKind Kind = ARMCP::CPValue,
unsigned char PCAdj = 0, const char *Modifier = NULL,
bool AddCurrentAddress = false);
ARMConstantPoolValue(LLVMContext &C, const char *s, unsigned id,
unsigned char PCAdj = 0, const char *Modifier = NULL,
bool AddCurrentAddress = false);
ARMConstantPoolValue(GlobalValue *GV, const char *Modifier);
ARMConstantPoolValue(const GlobalValue *GV, const char *Modifier);
ARMConstantPoolValue();
~ARMConstantPoolValue();
GlobalValue *getGV() const;
const GlobalValue *getGV() const;
const char *getSymbol() const { return S; }
BlockAddress *getBlockAddress() const;
const BlockAddress *getBlockAddress() const;
const char *getModifier() const { return Modifier; }
bool hasModifier() const { return Modifier != NULL; }
bool mustAddCurrentAddress() const { return AddCurrentAddress; }

View File

@ -91,7 +91,7 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
LO16 = LO16.addImm(Lo16);
HI16 = HI16.addImm(Hi16);
} else {
GlobalValue *GV = MO.getGlobal();
const GlobalValue *GV = MO.getGlobal();
unsigned TF = MO.getTargetFlags();
LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);

View File

@ -1070,7 +1070,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
false, false, 0);
}
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
GlobalValue *GV = G->getGlobal();
const GlobalValue *GV = G->getGlobal();
isDirect = true;
bool isExt = GV->isDeclaration() || GV->isWeakForLinker();
bool isStub = (isExt && Subtarget->isTargetDarwin()) &&
@ -1282,7 +1282,7 @@ SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
unsigned ARMPCLabelIndex = 0;
DebugLoc DL = Op.getDebugLoc();
EVT PtrVT = getPointerTy();
BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
SDValue CPAddr;
if (RelocM == Reloc::Static) {
@ -1348,7 +1348,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
SelectionDAG &DAG) {
GlobalValue *GV = GA->getGlobal();
const GlobalValue *GV = GA->getGlobal();
DebugLoc dl = GA->getDebugLoc();
SDValue Offset;
SDValue Chain = DAG.getEntryNode();
@ -1411,7 +1411,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
SelectionDAG &DAG) {
EVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
if (RelocM == Reloc::PIC_) {
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
@ -1454,7 +1454,7 @@ SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
unsigned ARMPCLabelIndex = 0;
EVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Reloc::Model RelocM = getTargetMachine().getRelocationModel();
SDValue CPAddr;
if (RelocM == Reloc::Static)
@ -1850,7 +1850,7 @@ static bool isFloatingPointZero(SDValue Op) {
if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
SDValue WrapperOp = Op.getOperand(1).getOperand(0);
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
return CFP->getValueAPF().isPosZero();
}
}

View File

@ -116,7 +116,8 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &FS,
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
bool
ARMSubtarget::GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) const {
ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
Reloc::Model RelocM) const {
if (RelocM == Reloc::Static)
return false;

View File

@ -160,7 +160,7 @@ protected:
/// GVIsIndirectSymbol - true if the GV will be accessed via an indirect
/// symbol.
bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) const;
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const;
};
} // End llvm namespace

View File

@ -239,7 +239,7 @@ namespace {
} else if (ACPV->isBlockAddress()) {
O << *GetBlockAddressSymbol(ACPV->getBlockAddress());
} else if (ACPV->isGlobalValue()) {
GlobalValue *GV = ACPV->getGV();
const GlobalValue *GV = ACPV->getGV();
bool isIndirect = Subtarget->isTargetDarwin() &&
Subtarget->GVIsIndirectSymbol(GV, TM.getRelocationModel());
if (!isIndirect)
@ -352,7 +352,7 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum,
return;
case MachineOperand::MO_GlobalAddress: {
bool isCallOp = Modifier && !strcmp(Modifier, "call");
GlobalValue *GV = MO.getGlobal();
const GlobalValue *GV = MO.getGlobal();
if ((Modifier && strcmp(Modifier, "lo16") == 0) ||
(TF & ARMII::MO_LO16))

View File

@ -56,7 +56,7 @@ void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
unsigned PredReg) const {
MachineFunction &MF = *MBB.getParent();
MachineConstantPool *ConstantPool = MF.getConstantPool();
Constant *C = ConstantInt::get(
const Constant *C = ConstantInt::get(
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);

View File

@ -52,7 +52,7 @@ void Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
unsigned PredReg) const {
MachineFunction &MF = *MBB.getParent();
MachineConstantPool *ConstantPool = MF.getConstantPool();
Constant *C = ConstantInt::get(
const Constant *C = ConstantInt::get(
Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);

View File

@ -192,10 +192,13 @@ unsigned AlphaCodeEmitter::getMachineOpValue(const MachineInstr &MI,
llvm_unreachable("unknown relocatable instruction");
}
if (MO.isGlobal())
MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(),
Reloc, MO.getGlobal(), Offset,
isa<Function>(MO.getGlobal()),
useGOT));
MCE.addRelocation(MachineRelocation::getGV(
MCE.getCurrentPCOffset(),
Reloc,
const_cast<GlobalValue *>(MO.getGlobal()),
Offset,
isa<Function>(MO.getGlobal()),
useGOT));
else if (MO.isSymbol())
MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
Reloc, MO.getSymbolName(),

View File

@ -624,7 +624,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
}
case ISD::ConstantPool: {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
const Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment());
// FIXME there isn't really any debug info here
@ -637,7 +637,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
llvm_unreachable("TLS not implemented for Alpha.");
case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
const GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset());
// FIXME there isn't really any debug info here

View File

@ -141,7 +141,7 @@ MVT::SimpleValueType BlackfinTargetLowering::getSetCCResultType(EVT VT) const {
SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) {
DebugLoc DL = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Op = DAG.getTargetGlobalAddress(GV, MVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);

View File

@ -307,7 +307,7 @@ void SPUAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
// External or weakly linked global variables need non-lazily-resolved
// stubs
if (TM.getRelocationModel() != Reloc::Static) {
GlobalValue *GV = MO.getGlobal();
const GlobalValue *GV = MO.getGlobal();
if (((GV->isDeclaration() || GV->hasWeakLinkage() ||
GV->hasLinkOnceLinkage() || GV->hasCommonLinkage()))) {
O << *GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");

View File

@ -306,7 +306,7 @@ namespace {
CV.push_back(const_cast<ConstantInt *>(V->getConstantIntValue()));
}
Constant *CP = ConstantVector::get(CV);
const Constant *CP = ConstantVector::get(CV);
SDValue CPIdx = CurDAG->getConstantPool(CP, SPUtli.getPointerTy());
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue CGPoolOffset =
@ -454,7 +454,7 @@ SPUDAGToDAGISel::SelectAFormAddr(SDNode *Op, SDValue N, SDValue &Base,
case ISD::TargetGlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op0);
GlobalValue *GV = GSDN->getGlobal();
const GlobalValue *GV = GSDN->getGlobal();
if (GV->getAlignment() == 16) {
Base = Op0;
Index = Zero;

View File

@ -893,7 +893,7 @@ static SDValue
LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
const Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
SDValue Zero = DAG.getConstant(0, PtrVT);
const TargetMachine &TM = DAG.getTarget();
@ -951,7 +951,7 @@ static SDValue
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
const GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
const TargetMachine &TM = DAG.getTarget();
SDValue Zero = DAG.getConstant(0, PtrVT);
@ -1242,7 +1242,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
GlobalValue *GV = G->getGlobal();
const GlobalValue *GV = G->getGlobal();
EVT CalleeVT = Callee.getValueType();
SDValue Zero = DAG.getConstant(0, PtrVT);
SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT);

View File

@ -412,7 +412,7 @@ SDValue MBlazeTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
return DAG.getNode(MBlazeISD::Wrap, dl, MVT::i32, GA);
@ -446,7 +446,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
SDValue ResNode;
EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
Constant *C = N->getConstVal();
const Constant *C = N->getConstVal();
SDValue Zero = DAG.getConstant(0, PtrVT);
DebugLoc dl = Op.getDebugLoc();

View File

@ -45,9 +45,9 @@ namespace {
} Base;
int16_t Disp;
GlobalValue *GV;
Constant *CP;
BlockAddress *BlockAddr;
const GlobalValue *GV;
const Constant *CP;
const BlockAddress *BlockAddr;
const char *ES;
int JT;
unsigned Align; // CP alignment.

View File

@ -484,7 +484,7 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG)
SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
SDVTList VTs = DAG.getVTList(MVT::i32);
@ -564,7 +564,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
{
SDValue ResNode;
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
Constant *C = N->getConstVal();
const Constant *C = N->getConstVal();
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();

View File

@ -1409,7 +1409,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (IsDirectCall) {
// Considering the GlobalAddressNode case here.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
GlobalValue *GV = G->getGlobal();
const GlobalValue *GV = G->getGlobal();
Callee = DAG.getTargetGlobalAddress(GV, MVT::i8);
Name = G->getGlobal()->getName();
} else {// Considering the ExternalSymbol case here

View File

@ -172,7 +172,7 @@ void PIC16Cloner::CloneAutos(Function *F) {
VarName = I->getName().str();
if (PAN::isLocalToFunc(FnName, VarName)) {
// Auto variable for current function found. Clone it.
GlobalVariable *GV = I;
const GlobalVariable *GV = I;
const Type *InitTy = GV->getInitializer()->getType();
GlobalVariable *ClonedGV =

View File

@ -134,7 +134,7 @@ void PIC16TargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &tm){
const MCSection *
PIC16TargetObjectFile::allocateUDATA(const GlobalVariable *GV) const {
assert(GV->hasInitializer() && "This global doesn't need space");
Constant *C = GV->getInitializer();
const Constant *C = GV->getInitializer();
assert(C->isNullValue() && "Unitialized globals has non-zero initializer");
// Find how much space this global needs.
@ -169,7 +169,7 @@ PIC16TargetObjectFile::allocateUDATA(const GlobalVariable *GV) const {
const MCSection *
PIC16TargetObjectFile::allocateIDATA(const GlobalVariable *GV) const{
assert(GV->hasInitializer() && "This global doesn't need space");
Constant *C = GV->getInitializer();
const Constant *C = GV->getInitializer();
assert(!C->isNullValue() && "initialized globals has zero initializer");
assert(GV->getType()->getAddressSpace() == PIC16ISD::RAM_SPACE &&
"can allocate initialized RAM data only");

View File

@ -200,7 +200,7 @@ namespace {
const MachineOperand &MO = MI->getOperand(OpNo);
if (TM.getRelocationModel() != Reloc::Static) {
if (MO.getType() == MachineOperand::MO_GlobalAddress) {
GlobalValue *GV = MO.getGlobal();
const GlobalValue *GV = MO.getGlobal();
if (GV->isDeclaration() || GV->isWeakForLinker()) {
// Dynamically-resolved functions need a stub for the function.
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
@ -405,7 +405,7 @@ void PPCAsmPrinter::printOp(const MachineOperand &MO, raw_ostream &O) {
}
case MachineOperand::MO_GlobalAddress: {
// Computing the address of a global symbol, not calling it.
GlobalValue *GV = MO.getGlobal();
const GlobalValue *GV = MO.getGlobal();
MCSymbol *SymToPrint;
// External or weakly linked global variables need non-lazily-resolved stubs
@ -794,8 +794,8 @@ bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
if (MAI->doesSupportExceptionHandling() && MMI) {
// Add the (possibly multiple) personalities to the set of global values.
// Only referenced functions get into the Personalities list.
const std::vector<Function *> &Personalities = MMI->getPersonalities();
for (std::vector<Function *>::const_iterator I = Personalities.begin(),
const std::vector<const Function*> &Personalities = MMI->getPersonalities();
for (std::vector<const Function*>::const_iterator I = Personalities.begin(),
E = Personalities.end(); I != E; ++I) {
if (*I) {
MCSymbol *NLPSym = GetSymbolWithGlobalValueBase(*I, "$non_lazy_ptr");

View File

@ -202,7 +202,7 @@ unsigned PPCCodeEmitter::getMachineOpValue(const MachineInstr &MI,
MachineRelocation R;
if (MO.isGlobal()) {
R = MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
MO.getGlobal(), 0,
const_cast<GlobalValue *>(MO.getGlobal()), 0,
isa<Function>(MO.getGlobal()));
} else if (MO.isSymbol()) {
R = MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),

View File

@ -476,7 +476,7 @@ static bool isFloatingPointZero(SDValue Op) {
else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
// Maybe this has already been legalized into the constant pool?
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
return CFP->getValueAPF().isZero();
}
return false;
@ -1098,7 +1098,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) {
EVT PtrVT = Op.getValueType();
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
const Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there isn't really any debug info here
@ -1172,7 +1172,7 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
EVT PtrVT = Op.getValueType();
DebugLoc DL = Op.getDebugLoc();
BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue TgtBA = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true);
SDValue Zero = DAG.getConstant(0, PtrVT);
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, TgtBA, Zero);
@ -1202,7 +1202,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) {
EVT PtrVT = Op.getValueType();
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
const GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset());
SDValue Zero = DAG.getConstant(0, PtrVT);
// FIXME there isn't really any debug info here

View File

@ -753,7 +753,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) {
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
@ -777,7 +777,7 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
Constant *C = N->getConstVal();
const Constant *C = N->getConstVal();
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);

View File

@ -716,7 +716,7 @@ SDValue SystemZTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
SDValue SystemZTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;

View File

@ -44,7 +44,7 @@ struct SystemZAddressMode {
unsigned IndexReg;
int32_t Disp;
GlobalValue *GV;
const GlobalValue *GV;
SystemZAddressMode() : BaseType(RegBase), IndexReg(0), Disp(0) {
Base.Reg = 0;

View File

@ -79,7 +79,7 @@ namespace {
private:
void emitPCRelativeBlockAddress(MachineBasicBlock *MBB);
void emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
intptr_t Disp = 0, intptr_t PCAdj = 0,
bool Indirect = false);
void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
@ -163,7 +163,8 @@ void Emitter<CodeEmitter>::emitPCRelativeBlockAddress(MachineBasicBlock *MBB) {
/// this is part of a "take the address of a global" instruction.
///
template<class CodeEmitter>
void Emitter<CodeEmitter>::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
void Emitter<CodeEmitter>::emitGlobalAddress(const GlobalValue *GV,
unsigned Reloc,
intptr_t Disp /* = 0 */,
intptr_t PCAdj /* = 0 */,
bool Indirect /* = false */) {
@ -174,9 +175,10 @@ void Emitter<CodeEmitter>::emitGlobalAddress(GlobalValue *GV, unsigned Reloc,
RelocCST = PCAdj;
MachineRelocation MR = Indirect
? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
GV, RelocCST, false)
const_cast<GlobalValue *>(GV),
RelocCST, false)
: MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
GV, RelocCST, false);
const_cast<GlobalValue *>(GV), RelocCST, false);
MCE.addRelocation(MR);
// The relocated value will be added to the displacement
if (Reloc == X86::reloc_absolute_dword)

View File

@ -72,16 +72,16 @@ public:
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
virtual bool TargetSelectInstruction(Instruction *I);
virtual bool TargetSelectInstruction(const Instruction *I);
#include "X86GenFastISel.inc"
private:
bool X86FastEmitCompare(Value *LHS, Value *RHS, EVT VT);
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
bool X86FastEmitStore(EVT VT, Value *Val,
bool X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM);
bool X86FastEmitStore(EVT VT, unsigned Val,
const X86AddressMode &AM);
@ -89,32 +89,32 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg);
bool X86SelectAddress(Value *V, X86AddressMode &AM);
bool X86SelectCallAddress(Value *V, X86AddressMode &AM);
bool X86SelectAddress(const Value *V, X86AddressMode &AM);
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
bool X86SelectLoad(Instruction *I);
bool X86SelectLoad(const Instruction *I);
bool X86SelectStore(Instruction *I);
bool X86SelectStore(const Instruction *I);
bool X86SelectCmp(Instruction *I);
bool X86SelectCmp(const Instruction *I);
bool X86SelectZExt(Instruction *I);
bool X86SelectZExt(const Instruction *I);
bool X86SelectBranch(Instruction *I);
bool X86SelectBranch(const Instruction *I);
bool X86SelectShift(Instruction *I);
bool X86SelectShift(const Instruction *I);
bool X86SelectSelect(Instruction *I);
bool X86SelectSelect(const Instruction *I);
bool X86SelectTrunc(Instruction *I);
bool X86SelectTrunc(const Instruction *I);
bool X86SelectFPExt(Instruction *I);
bool X86SelectFPTrunc(Instruction *I);
bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I);
bool X86SelectExtractValue(Instruction *I);
bool X86SelectExtractValue(const Instruction *I);
bool X86VisitIntrinsicCall(IntrinsicInst &I);
bool X86SelectCall(Instruction *I);
bool X86VisitIntrinsicCall(const IntrinsicInst &I);
bool X86SelectCall(const Instruction *I);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isTailCall = false);
@ -125,9 +125,9 @@ private:
return static_cast<const X86TargetMachine *>(&TM);
}
unsigned TargetMaterializeConstant(Constant *C);
unsigned TargetMaterializeConstant(const Constant *C);
unsigned TargetMaterializeAlloca(AllocaInst *C);
unsigned TargetMaterializeAlloca(const AllocaInst *C);
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
@ -280,14 +280,14 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
return true;
}
bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0;
bool Signed = true;
switch (VT.getSimpleVT().SimpleTy) {
@ -335,13 +335,13 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
/// X86SelectAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
User *U = NULL;
bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (const Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
U = I;
} else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
@ -378,7 +378,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
case Instruction::Add: {
// Adds of constants are common and easy enough.
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
// They have to fit in the 32-bit signed displacement field though.
if (isInt<32>(Disp)) {
@ -399,16 +399,16 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
gep_type_iterator GTI = gep_type_begin(U);
// Iterate through the indices, folding what we can. Constants can be
// folded, and one dynamic index can be handled, if the scale is supported.
for (User::op_iterator i = U->op_begin() + 1, e = U->op_end();
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
Value *Op = *i;
const Value *Op = *i;
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
Disp += SL->getElementOffset(Idx);
} else {
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
Disp += CI->getSExtValue() * S;
} else if (IndexReg == 0 &&
@ -446,7 +446,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
}
// Handle constant address.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
@ -457,7 +457,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
return false;
// Can't handle TLS yet.
if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal())
return false;
@ -544,13 +544,13 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
///
bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
User *U = NULL;
bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
const User *U = NULL;
unsigned Opcode = Instruction::UserOp1;
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (const Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
U = I;
} else if (ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
Opcode = C->getOpcode();
U = C;
}
@ -575,7 +575,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
}
// Handle constant address.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
return false;
@ -586,7 +586,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
return false;
// Can't handle TLS or DLLImport.
if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->isThreadLocal() || GVar->hasDLLImportLinkage())
return false;
@ -627,7 +627,7 @@ bool X86FastISel::X86SelectCallAddress(Value *V, X86AddressMode &AM) {
/// X86SelectStore - Select and emit code to implement store instructions.
bool X86FastISel::X86SelectStore(Instruction* I) {
bool X86FastISel::X86SelectStore(const Instruction *I) {
EVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
return false;
@ -641,7 +641,7 @@ bool X86FastISel::X86SelectStore(Instruction* I) {
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(Instruction *I) {
bool X86FastISel::X86SelectLoad(const Instruction *I) {
EVT VT;
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
return false;
@ -673,7 +673,7 @@ static unsigned X86ChooseCmpOpcode(EVT VT) {
/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
/// of the comparison, return an opcode that works for the compare (e.g.
/// CMP32ri) otherwise return 0.
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
switch (VT.getSimpleVT().SimpleTy) {
// Otherwise, we can't fold the immediate into this comparison.
default: return 0;
@ -689,7 +689,8 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
}
}
bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
EVT VT) {
unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false;
@ -700,7 +701,7 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg)
.addImm(Op1C->getSExtValue());
@ -718,8 +719,8 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) {
return true;
}
bool X86FastISel::X86SelectCmp(Instruction *I) {
CmpInst *CI = cast<CmpInst>(I);
bool X86FastISel::X86SelectCmp(const Instruction *I) {
const CmpInst *CI = cast<CmpInst>(I);
EVT VT;
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
@ -781,7 +782,7 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
return false;
}
Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
@ -794,7 +795,7 @@ bool X86FastISel::X86SelectCmp(Instruction *I) {
return true;
}
bool X86FastISel::X86SelectZExt(Instruction *I) {
bool X86FastISel::X86SelectZExt(const Instruction *I) {
// Handle zero-extension from i1 to i8, which is common.
if (I->getType()->isIntegerTy(8) &&
I->getOperand(0)->getType()->isIntegerTy(1)) {
@ -811,15 +812,15 @@ bool X86FastISel::X86SelectZExt(Instruction *I) {
}
bool X86FastISel::X86SelectBranch(Instruction *I) {
bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Unconditional branches are selected by tablegen-generated code.
// Handle a conditional branch.
BranchInst *BI = cast<BranchInst>(I);
const BranchInst *BI = cast<BranchInst>(I);
MachineBasicBlock *TrueMBB = MBBMap[BI->getSuccessor(0)];
MachineBasicBlock *FalseMBB = MBBMap[BI->getSuccessor(1)];
// Fold the common case of a conditional branch with a comparison.
if (CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
if (CI->hasOneUse()) {
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
@ -866,7 +867,7 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
return false;
}
Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs)
std::swap(Op0, Op1);
@ -901,7 +902,8 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
// looking for the SETO/SETB instruction. If an instruction modifies the
// EFLAGS register before we reach the SETO/SETB instruction, then we can't
// convert the branch into a JO/JB instruction.
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
if (const IntrinsicInst *CI =
dyn_cast<IntrinsicInst>(EI->getAggregateOperand())){
if (CI->getIntrinsicID() == Intrinsic::sadd_with_overflow ||
CI->getIntrinsicID() == Intrinsic::uadd_with_overflow) {
const MachineInstr *SetMI = 0;
@ -956,7 +958,7 @@ bool X86FastISel::X86SelectBranch(Instruction *I) {
return true;
}
bool X86FastISel::X86SelectShift(Instruction *I) {
bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned CReg = 0, OpReg = 0, OpImm = 0;
const TargetRegisterClass *RC = NULL;
if (I->getType()->isIntegerTy(8)) {
@ -1007,7 +1009,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
if (Op0Reg == 0) return false;
// Fold immediate in shl(x,3).
if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC);
BuildMI(MBB, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
@ -1032,7 +1034,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
return true;
}
bool X86FastISel::X86SelectSelect(Instruction *I) {
bool X86FastISel::X86SelectSelect(const Instruction *I) {
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
return false;
@ -1066,11 +1068,11 @@ bool X86FastISel::X86SelectSelect(Instruction *I) {
return true;
}
bool X86FastISel::X86SelectFPExt(Instruction *I) {
bool X86FastISel::X86SelectFPExt(const Instruction *I) {
// fpext from float to double.
if (Subtarget->hasSSE2() &&
I->getType()->isDoubleTy()) {
Value *V = I->getOperand(0);
const Value *V = I->getOperand(0);
if (V->getType()->isFloatTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
@ -1084,10 +1086,10 @@ bool X86FastISel::X86SelectFPExt(Instruction *I) {
return false;
}
bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
if (Subtarget->hasSSE2()) {
if (I->getType()->isFloatTy()) {
Value *V = I->getOperand(0);
const Value *V = I->getOperand(0);
if (V->getType()->isDoubleTy()) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
@ -1102,7 +1104,7 @@ bool X86FastISel::X86SelectFPTrunc(Instruction *I) {
return false;
}
bool X86FastISel::X86SelectTrunc(Instruction *I) {
bool X86FastISel::X86SelectTrunc(const Instruction *I) {
if (Subtarget->is64Bit())
// All other cases should be handled by the tblgen generated code.
return false;
@ -1139,11 +1141,11 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
return true;
}
bool X86FastISel::X86SelectExtractValue(Instruction *I) {
ExtractValueInst *EI = cast<ExtractValueInst>(I);
Value *Agg = EI->getAggregateOperand();
bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
const ExtractValueInst *EI = cast<ExtractValueInst>(I);
const Value *Agg = EI->getAggregateOperand();
if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Agg)) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
@ -1160,7 +1162,7 @@ bool X86FastISel::X86SelectExtractValue(Instruction *I) {
return false;
}
bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
switch (I.getIntrinsicID()) {
default: return false;
@ -1168,8 +1170,8 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
// Emit code inline code to store the stack guard onto the stack.
EVT PtrTy = TLI.getPointerTy();
Value *Op1 = I.getOperand(1); // The guard's value.
AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
const Value *Op1 = I.getOperand(1); // The guard's value.
const AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
// Grab the frame index.
X86AddressMode AM;
@ -1204,7 +1206,7 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
return true;
}
case Intrinsic::dbg_declare: {
DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
X86AddressMode AM;
assert(DI->getAddress() && "Null address should be checked earlier!");
if (!X86SelectAddress(DI->getAddress(), AM))
@ -1235,8 +1237,8 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
if (!isTypeLegal(RetTy, VT))
return false;
Value *Op1 = I.getOperand(1);
Value *Op2 = I.getOperand(2);
const Value *Op1 = I.getOperand(1);
const Value *Op2 = I.getOperand(2);
unsigned Reg1 = getRegForValue(Op1);
unsigned Reg2 = getRegForValue(Op2);
@ -1277,20 +1279,20 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
}
}
bool X86FastISel::X86SelectCall(Instruction *I) {
CallInst *CI = cast<CallInst>(I);
Value *Callee = I->getOperand(0);
bool X86FastISel::X86SelectCall(const Instruction *I) {
const CallInst *CI = cast<CallInst>(I);
const Value *Callee = I->getOperand(0);
// Can't handle inline asm yet.
if (isa<InlineAsm>(Callee))
return false;
// Handle intrinsic calls.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
return X86VisitIntrinsicCall(*II);
// Handle only C and fastcc calling conventions for now.
CallSite CS(CI);
ImmutableCallSite CS(CI);
CallingConv::ID CC = CS.getCallingConv();
if (CC != CallingConv::C &&
CC != CallingConv::Fast &&
@ -1322,7 +1324,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
if (!X86SelectCallAddress(Callee, CalleeAM))
return false;
unsigned CalleeOp = 0;
GlobalValue *GV = 0;
const GlobalValue *GV = 0;
if (CalleeAM.GV != 0) {
GV = CalleeAM.GV;
} else if (CalleeAM.Base.Reg != 0) {
@ -1338,7 +1340,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
}
// Deal with call operands first.
SmallVector<Value*, 8> ArgVals;
SmallVector<const Value *, 8> ArgVals;
SmallVector<unsigned, 8> Args;
SmallVector<EVT, 8> ArgVTs;
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
@ -1346,7 +1348,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
ArgVals.reserve(CS.arg_size());
ArgVTs.reserve(CS.arg_size());
ArgFlags.reserve(CS.arg_size());
for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
unsigned Arg = getRegForValue(*i);
if (Arg == 0)
@ -1454,7 +1456,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
X86AddressMode AM;
AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset;
Value *ArgVal = ArgVals[VA.getValNo()];
const Value *ArgVal = ArgVals[VA.getValNo()];
// If this is a really simple value, emit this with the Value* version of
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
@ -1585,7 +1587,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
bool
X86FastISel::TargetSelectInstruction(Instruction *I) {
X86FastISel::TargetSelectInstruction(const Instruction *I) {
switch (I->getOpcode()) {
default: break;
case Instruction::Load:
@ -1633,7 +1635,7 @@ X86FastISel::TargetSelectInstruction(Instruction *I) {
return false;
}
unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
EVT VT;
if (!isTypeLegal(C->getType(), VT))
return false;
@ -1728,7 +1730,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
return ResultReg;
}
unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
// Fail on dynamic allocas. At this point, getRegForValue has already
// checked its CSE maps, so if we're here trying to handle a dynamic
// alloca, we're not going to succeed. X86SelectAddress has a

View File

@ -20,7 +20,6 @@
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/GlobalValue.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/CFG.h"
@ -66,9 +65,9 @@ namespace {
SDValue IndexReg;
int32_t Disp;
SDValue Segment;
GlobalValue *GV;
Constant *CP;
BlockAddress *BlockAddr;
const GlobalValue *GV;
const Constant *CP;
const BlockAddress *BlockAddr;
const char *ES;
int JT;
unsigned Align; // CP alignment.

View File

@ -2067,7 +2067,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
GlobalValue *GV = G->getGlobal();
const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportLinkage()) {
unsigned char OpFlags = 0;
@ -5149,7 +5149,7 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) {
unsigned char OpFlags =
Subtarget->ClassifyBlockAddressReference();
CodeModel::Model M = getTargetMachine().getCodeModel();
BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
DebugLoc dl = Op.getDebugLoc();
SDValue Result = DAG.getBlockAddress(BA, getPointerTy(),
/*isTarget=*/true, OpFlags);
@ -8882,7 +8882,8 @@ void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
/// node is a GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
GlobalValue* &GA, int64_t &Offset) const{
const GlobalValue* &GA,
int64_t &Offset) const {
if (N->getOpcode() == X86ISD::Wrapper) {
if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
@ -10167,7 +10168,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
return;
}
GlobalValue *GV = GA->getGlobal();
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
if (isGlobalStubReference(Subtarget->ClassifyGlobalReference(GV,

View File

@ -475,7 +475,7 @@ namespace llvm {
unsigned Depth = 0) const;
virtual bool
isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);

View File

@ -49,7 +49,7 @@ struct X86AddressMode {
unsigned Scale;
unsigned IndexReg;
int Disp;
GlobalValue *GV;
const GlobalValue *GV;
unsigned GVOpFlags;
X86AddressMode()

View File

@ -2629,7 +2629,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
const Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);

View File

@ -220,7 +220,7 @@ LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
}
SDValue XCoreTargetLowering::
getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG)
getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV, SelectionDAG &DAG)
{
// FIXME there is no actual debug info here
DebugLoc dl = GA.getDebugLoc();
@ -243,7 +243,7 @@ getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG)
SDValue XCoreTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
{
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
// If it's a debug information descriptor, don't mess with it.
if (DAG.isVerifiedDebugInfoDesc(Op))
@ -267,7 +267,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
// FIXME there isn't really debug info here
DebugLoc dl = Op.getDebugLoc();
// transform to label + getid() * size
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
if (!GVar) {
@ -300,7 +300,7 @@ LowerBlockAddress(SDValue Op, SelectionDAG &DAG)
{
DebugLoc DL = Op.getDebugLoc();
BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);

View File

@ -129,7 +129,7 @@ namespace llvm {
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals);
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
SDValue getGlobalAddressWrapper(SDValue GA, GlobalValue *GV,
SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG);
// Lower Operand specifics