Recommit the fix for rdar://9289512 with a couple tweaks to

fix bugs exposed by the gcc dejagnu testsuite:
1. The load may actually be used by a dead instruction, which
   would cause an assert.
2. The load may not be used by the current chain of instructions,
   and we could move it past a side-effecting instruction. Change
   how we process uses to define the problem away.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@130018 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2011-04-22 21:59:37 +00:00
parent 597fa65373
commit b686af053e
4 changed files with 113 additions and 18 deletions

View File

@ -280,7 +280,8 @@ private:
void PrepareEHLandingPad();
void SelectAllBasicBlocks(const Function &Fn);
bool TryToFoldFastISelLoad(const LoadInst *LI, FastISel *FastIS);
bool TryToFoldFastISelLoad(const LoadInst *LI, const Instruction *FoldInst,
FastISel *FastIS);
void FinishBasicBlock();
void SelectBasicBlock(BasicBlock::const_iterator Begin,

View File

@ -746,16 +746,49 @@ void SelectionDAGISel::PrepareEHLandingPad() {
/// TryToFoldFastISelLoad - We're checking to see if we can fold the specified
/// load into the specified FoldInst. Note that we could have a sequence where
/// multiple LLVM IR instructions are folded into the same machineinstr. For
/// example we could have:
/// A: x = load i32 *P
/// B: y = icmp A, 42
/// C: br y, ...
///
/// In this scenario, LI is "A", and FoldInst is "C". We know about "B" (and
/// any other folded instructions) because it is between A and C.
///
/// If we succeed in folding the load into the operation, return true.
///
bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
const Instruction *FoldInst,
FastISel *FastIS) {
// We know that the load has a single use, but don't know what it is. If it
// isn't one of the folded instructions, then we can't succeed here. Handle
// this by scanning the single-use users of the load until we get to FoldInst.
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
const Instruction *TheUser = LI->use_back();
while (TheUser != FoldInst && // Scan up until we find FoldInst.
// Stay in the right block.
TheUser->getParent() == FoldInst->getParent() &&
--MaxUsers) { // Don't scan too far.
// If there are multiple or no uses of this instruction, then bail out.
if (!TheUser->hasOneUse())
return false;
TheUser = TheUser->use_back();
}
// Don't try to fold volatile loads. Target has to deal with alignment
// constraints.
if (LI->isVolatile()) return false;
// Figure out which vreg this is going into.
// Figure out which vreg this is going into. If there is no assigned vreg yet
// then there actually was no reference to it. Perhaps the load is referenced
// by a dead instruction.
unsigned LoadReg = FastIS->getRegForValue(LI);
assert(LoadReg && "Load isn't already assigned a vreg? ");
if (LoadReg == 0)
return false;
// Check to see what the uses of this vreg are. If it has no uses, or more
// than one use (at the machine instr level) then we can't fold it.
@ -833,10 +866,10 @@ static void CheckLineNumbers(const MachineBasicBlock *MBB) {
/// Return false if it needs to be emitted.
static bool isFoldedOrDeadInstruction(const Instruction *I,
FunctionLoweringInfo *FuncInfo) {
return !I->mayWriteToMemory() &&
!isa<TerminatorInst>(I) &&
!isa<DbgInfoIntrinsic>(I) &&
!FuncInfo->isExportedInst(I);
return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
!isa<TerminatorInst>(I) && // Terminators aren't folded.
!isa<DbgInfoIntrinsic>(I) && // Debug instructions aren't folded.
!FuncInfo->isExportedInst(I); // Exported instrs must be computed.
}
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
@ -928,16 +961,20 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Try to select the instruction with FastISel.
if (FastIS->SelectInstruction(Inst)) {
// If fast isel succeeded, check to see if there is a single-use
// non-volatile load right before the selected instruction, and see if
// the load is used by the instruction. If so, try to fold it.
const Instruction *BeforeInst = 0;
if (Inst != Begin)
BeforeInst = llvm::prior(llvm::prior(BI));
if (BeforeInst && isa<LoadInst>(BeforeInst) &&
BeforeInst->hasOneUse() && *BeforeInst->use_begin() == Inst &&
TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), FastIS))
--BI; // If we succeeded, don't re-select the load.
// If fast isel succeeded, skip over all the folded instructions, and
// then see if there is a load right before the selected instructions.
// Try to fold the load if so.
const Instruction *BeforeInst = Inst;
while (BeforeInst != Begin) {
BeforeInst = llvm::prior(BasicBlock::const_iterator(BeforeInst));
if (!isFoldedOrDeadInstruction(BeforeInst, FuncInfo))
break;
}
if (BeforeInst != Inst && isa<LoadInst>(BeforeInst) &&
BeforeInst->hasOneUse() &&
TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), Inst, FastIS))
// If we succeeded, don't re-select the load.
BI = llvm::next(BasicBlock::const_iterator(BeforeInst));
continue;
}

View File

@ -14,6 +14,28 @@ define i32 @test1(i32 %i) nounwind ssp {
; CHECK: andl $8,
; rdar://9289512 - The load should fold into the compare.
define void @test2(i64 %x) nounwind ssp {
entry:
%x.addr = alloca i64, align 8
store i64 %x, i64* %x.addr, align 8
%tmp = load i64* %x.addr, align 8
%cmp = icmp sgt i64 %tmp, 42
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
br label %if.end
if.end: ; preds = %if.then, %entry
ret void
; CHECK: test2:
; CHECK: movq %rdi, -8(%rsp)
; CHECK: cmpq $42, -8(%rsp)
}
@G = external global i32
define i64 @test3() nounwind {
%A = ptrtoint i32* @G to i64
@ -178,3 +200,28 @@ block2:
call void (...)* @test16callee(double 1.000000e+00)
ret void
}
declare void @foo() unnamed_addr ssp align 2
; Verify that we don't fold the load into the compare here. That would move it
; w.r.t. the call.
define i32 @test17(i32 *%P) ssp nounwind {
entry:
%tmp = load i32* %P
%cmp = icmp ne i32 %tmp, 5
call void @foo()
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
ret i32 1
if.else: ; preds = %entry
ret i32 2
; CHECK: test17:
; CHECK: movl (%rdi), %eax
; CHECK: callq _foo
; CHECK: cmpl $5, %eax
; CHECK-NEXT: je
}

View File

@ -92,3 +92,13 @@ define void @load_store_i1(i1* %p, i1* %q) nounwind {
store i1 %t, i1* %q
ret void
}
@crash_test1x = external global <2 x i32>, align 8
define void @crash_test1() nounwind ssp {
%tmp = load <2 x i32>* @crash_test1x, align 8
%neg = xor <2 x i32> %tmp, <i32 -1, i32 -1>
ret void
}