Fix a constant lowering bug. Now we can do load and store instructions with funky getelementptr embedded in the address operand.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@55975 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2008-09-09 01:26:59 +00:00
parent b2dfb89e0e
commit 59fbc80f6b
4 changed files with 75 additions and 31 deletions

View File

@ -80,6 +80,11 @@ public:
/// be assigned the value for the given LLVM value.
unsigned getRegForValue(Value *V);
/// lookUpRegForValue - Look up the value to see if its value is already
/// cached in a register. It may be defined by instructions across blocks or
/// defined locally.
unsigned lookUpRegForValue(Value *V);
virtual ~FastISel();
protected:

View File

@ -40,6 +40,9 @@ unsigned FastISel::getRegForValue(Value *V) {
// Don't cache constant materializations. To do so would require
// tracking what uses they dominate.
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
} else if (isa<GlobalValue>(V)) {
return TargetMaterializeConstant(dyn_cast<Constant>(V),
MBB->getParent()->getConstantPool());
} else if (isa<ConstantPointerNull>(V)) {
Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
@ -85,6 +88,16 @@ unsigned FastISel::getRegForValue(Value *V) {
return Reg;
}
unsigned FastISel::lookUpRegForValue(Value *V) {
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
// def-dominatess-use requirement enforced.
if (ValueMap.count(V))
return ValueMap[V];
return LocalValueMap[V];
}
/// UpdateValueMap - Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.

View File

@ -74,7 +74,8 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
unsigned &ResultReg);
bool X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall = false);
bool X86SelectConstAddr(Value *V, unsigned &Op0,
bool isCall = false, bool inReg = false);
bool X86SelectLoad(Instruction *I);
@ -285,7 +286,8 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
/// X86SelectConstAddr - Select and emit code to materialize constant address.
///
bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall) {
bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0,
bool isCall, bool inReg) {
// FIXME: Only GlobalAddress for now.
GlobalValue *GV = dyn_cast<GlobalValue>(V);
if (!GV)
@ -308,7 +310,24 @@ bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall) {
addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = Op0;
} else if (inReg) {
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
if (TLI.getPointerTy() == MVT::i32) {
Opc = X86::LEA32r;
RC = X86::GR32RegisterClass;
} else {
Opc = X86::LEA64r;
RC = X86::GR64RegisterClass;
}
Op0 = createResultReg(RC);
X86AddressMode AM;
AM.GV = GV;
addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
// Prevent materializing GV address multiple times in same MBB.
LocalValueMap[V] = Op0;
}
return true;
}
@ -323,12 +342,17 @@ bool X86FastISel::X86SelectStore(Instruction* I) {
return false;
Value *V = I->getOperand(1);
unsigned Ptr = getRegForValue(V);
if (Ptr == 0) {
// Handle constant store address.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr))
// Unhandled operand. Halt "fast" selection and bail.
return false;
unsigned Ptr = lookUpRegForValue(V);
if (!Ptr) {
// Handle constant load address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
Ptr = getRegForValue(V);
if (Ptr == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
return X86FastEmitStore(VT, Val, Ptr, 0, V);
@ -342,14 +366,17 @@ bool X86FastISel::X86SelectLoad(Instruction *I) {
return false;
Value *V = I->getOperand(0);
unsigned Ptr = getRegForValue(V);
if (Ptr == 0) {
unsigned Ptr = lookUpRegForValue(V);
if (!Ptr) {
// Handle constant load address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr))
// Unhandled operand. Halt "fast" selection and bail.
return false;
if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
Ptr = getRegForValue(V);
if (Ptr == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
}
}
unsigned ResultReg = 0;
@ -917,18 +944,8 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
if (TM.getRelocationModel() == Reloc::PIC_)
return 0;
MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
if (VT == MVT::iPTR)
// Use pointer type.
VT = TLI.getPointerTy();
// We only handle legal types. For example, on x86-32 the instruction
// selector contains all of the 64-bit instructions from x86-64,
// under the assumption that i64 won't be used if the target doesn't
// support it.
if (!TLI.isTypeLegal(VT))
MVT VT;
if (!isTypeLegal(C->getType(), TLI, VT))
return false;
// Get opcode and regclass of the output for the given load instruction.
@ -979,9 +996,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
unsigned ResultReg = createResultReg(RC);
if (isa<GlobalValue>(C)) {
// FIXME: If store value type is something we can't handle, this can result
// in a dead stub load instruction.
if (X86SelectConstAddr(C, ResultReg))
if (X86SelectConstAddr(C, ResultReg, false, true))
return ResultReg;
return 0;
}

View File

@ -1,5 +1,7 @@
; RUN: llvm-as < %s | llc -fast-isel -mtriple=i386-apple-darwin -mattr=sse2 | \
; RUN: grep mov | grep lazy_ptr | count 1
; RUN: llvm-as < %s | llc -fast-isel -mtriple=i386-apple-darwin | \
; RUN: grep mov | grep lazy_ptr | count 2
; RUN: llvm-as < %s | llc -fast-isel -march=x86 -relocation-model=static | \
; RUN: grep lea
@src = external global i32
@ -8,6 +10,15 @@ entry:
%0 = load i32* @src, align 4
%1 = load i32* @src, align 4
%2 = add i32 %0, %1
store i32 %2, i32* @src
store i32 %2, i32* @src
ret i32 %2
}
%stuff = type { i32 (...)** }
@LotsStuff = external constant [4 x i32 (...)*]
define void @t(%stuff* %this) nounwind {
entry:
store i32 (...)** getelementptr ([4 x i32 (...)*]* @LotsStuff, i32 0, i32 2), i32 (...)*** null, align 4
ret void
}