mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 16:33:28 +00:00
LSR: handle the expansion of phi operands that use postinc forms of the IV.
Fixes PR11431: SCEVExpander::expandAddRecExprLiterally(const llvm::SCEVAddRecExpr*): Assertion `(!isa<Instruction>(Result) || SE.DT->dominates(cast<Instruction>(Result), Builder.GetInsertPoint())) && "postinc expansion does not dominate use"' failed. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145482 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
caebb1e00b
commit
553fe05f23
@ -244,6 +244,8 @@ namespace llvm {
|
||||
const Loop *L,
|
||||
Type *ExpandTy,
|
||||
Type *IntTy);
|
||||
Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
|
||||
Type *ExpandTy, Type *IntTy, bool useSubtract);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -929,6 +929,36 @@ bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
|
||||
}
|
||||
}
|
||||
|
||||
/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
|
||||
/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
|
||||
/// need to materialize IV increments elsewhere to handle difficult situations.
|
||||
Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
|
||||
Type *ExpandTy, Type *IntTy,
|
||||
bool useSubtract) {
|
||||
Value *IncV;
|
||||
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
|
||||
if (ExpandTy->isPointerTy()) {
|
||||
PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
|
||||
// If the step isn't constant, don't use an implicitly scaled GEP, because
|
||||
// that would require a multiply inside the loop.
|
||||
if (!isa<ConstantInt>(StepV))
|
||||
GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
|
||||
GEPPtrTy->getAddressSpace());
|
||||
const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
|
||||
IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
|
||||
if (IncV->getType() != PN->getType()) {
|
||||
IncV = Builder.CreateBitCast(IncV, PN->getType());
|
||||
rememberInstruction(IncV);
|
||||
}
|
||||
} else {
|
||||
IncV = useSubtract ?
|
||||
Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
|
||||
Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
|
||||
rememberInstruction(IncV);
|
||||
}
|
||||
return IncV;
|
||||
}
|
||||
|
||||
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
|
||||
/// the base addrec, which is the addrec without any non-loop-dominating
|
||||
/// values, and return the PHI.
|
||||
@ -993,16 +1023,16 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
|
||||
L->getHeader()));
|
||||
|
||||
// Expand code for the step value. Insert instructions right before the
|
||||
// terminator corresponding to the back-edge. Do this before creating the PHI
|
||||
// so that PHI reuse code doesn't see an incomplete PHI. If the stride is
|
||||
// negative, insert a sub instead of an add for the increment (unless it's a
|
||||
// constant, because subtracts of constants are canonicalized to adds).
|
||||
// Expand code for the step value. Do this before creating the PHI so that PHI
|
||||
// reuse code doesn't see an incomplete PHI.
|
||||
const SCEV *Step = Normalized->getStepRecurrence(SE);
|
||||
bool isPointer = ExpandTy->isPointerTy();
|
||||
bool isNegative = !isPointer && isNonConstantNegative(Step);
|
||||
if (isNegative)
|
||||
// If the stride is negative, insert a sub instead of an add for the increment
|
||||
// (unless it's a constant, because subtracts of constants are canonicalized
|
||||
// to adds).
|
||||
bool useSubtract = !ExpandTy->isPointerTy() && isNonConstantNegative(Step);
|
||||
if (useSubtract)
|
||||
Step = SE.getNegativeSCEV(Step);
|
||||
// Expand the step somewhere that dominates the loop header.
|
||||
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
|
||||
|
||||
// Create the PHI.
|
||||
@ -1023,33 +1053,14 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create a step value and add it to the PHI. If IVIncInsertLoop is
|
||||
// non-null and equal to the addrec's loop, insert the instructions
|
||||
// at IVIncInsertPos.
|
||||
// Create a step value and add it to the PHI.
|
||||
// If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
|
||||
// instructions at IVIncInsertPos.
|
||||
Instruction *InsertPos = L == IVIncInsertLoop ?
|
||||
IVIncInsertPos : Pred->getTerminator();
|
||||
Builder.SetInsertPoint(InsertPos);
|
||||
Value *IncV;
|
||||
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
|
||||
if (isPointer) {
|
||||
PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
|
||||
// If the step isn't constant, don't use an implicitly scaled GEP, because
|
||||
// that would require a multiply inside the loop.
|
||||
if (!isa<ConstantInt>(StepV))
|
||||
GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
|
||||
GEPPtrTy->getAddressSpace());
|
||||
const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
|
||||
IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
|
||||
if (IncV->getType() != PN->getType()) {
|
||||
IncV = Builder.CreateBitCast(IncV, PN->getType());
|
||||
rememberInstruction(IncV);
|
||||
}
|
||||
} else {
|
||||
IncV = isNegative ?
|
||||
Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
|
||||
Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
|
||||
rememberInstruction(IncV);
|
||||
}
|
||||
Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
|
||||
|
||||
PN->addIncoming(IncV, Pred);
|
||||
}
|
||||
|
||||
@ -1124,10 +1135,31 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
// For an expansion to use the postinc form, the client must call
|
||||
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
|
||||
// or dominated by IVIncInsertPos.
|
||||
assert((!isa<Instruction>(Result) ||
|
||||
SE.DT->dominates(cast<Instruction>(Result),
|
||||
Builder.GetInsertPoint())) &&
|
||||
"postinc expansion does not dominate use");
|
||||
if (isa<Instruction>(Result)
|
||||
&& !SE.DT->dominates(cast<Instruction>(Result),
|
||||
Builder.GetInsertPoint())) {
|
||||
// The induction variable's postinc expansion does not dominate this use.
|
||||
// IVUsers tries to prevent this case, so it is rare. However, it can
|
||||
// happen when an IVUser outside the loop is not dominated by the latch
|
||||
// block. Adjusting IVIncInsertPos before expansion begins cannot handle
|
||||
// all cases. Consider a phi outide whose operand is replaced during
|
||||
// expansion with the value of the postinc user. Without fundamentally
|
||||
// changing the way postinc users are tracked, the only remedy is
|
||||
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
|
||||
// but hopefully expandCodeFor handles that.
|
||||
bool useSubtract =
|
||||
!ExpandTy->isPointerTy() && isNonConstantNegative(Step);
|
||||
if (useSubtract)
|
||||
Step = SE.getNegativeSCEV(Step);
|
||||
// Expand the step somewhere that dominates the loop header.
|
||||
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
||||
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
|
||||
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
|
||||
// Restore the insertion point to the place where the caller has
|
||||
// determined dominates all uses.
|
||||
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
|
||||
Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
|
||||
}
|
||||
}
|
||||
|
||||
// Re-apply any non-loop-dominating scale.
|
||||
|
56
test/Transforms/LoopStrengthReduce/2011-11-29-postincphi.ll
Normal file
56
test/Transforms/LoopStrengthReduce/2011-11-29-postincphi.ll
Normal file
@ -0,0 +1,56 @@
|
||||
; RUN: llc < %s | FileCheck %s
|
||||
;
|
||||
; PR11431: handle a phi operand that is replaced by a postinc user.
|
||||
; LSR first expands %mul to %iv1 in %phi32
|
||||
; LSR then expands %iv1 in %phi32 into two decrements, one on each loop exit.
|
||||
|
||||
target datalayout = "e-p:64:64:64-S128-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
|
||||
target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
declare i1 @check() nounwind
|
||||
|
||||
; Check that LSR did something close to the behavior at the time of the bug.
|
||||
; CHECK: @sqlite3DropTriggerPtr
|
||||
; CHECK: addq $48, %rax
|
||||
; CHECK: je
|
||||
; CHECK: addq $-48, %rax
|
||||
; CHECK: ret
|
||||
define i64 @sqlite3DropTriggerPtr() {
|
||||
entry:
|
||||
%cmp0 = call zeroext i1 @check()
|
||||
br i1 %cmp0, label %"8", label %"3"
|
||||
|
||||
"3": ; preds = %entry
|
||||
br i1 %cmp0, label %"4", label %"8"
|
||||
|
||||
"4": ; preds = %"3"
|
||||
br i1 %cmp0, label %"8", label %"5.preheader"
|
||||
|
||||
"5.preheader": ; preds = %"4"
|
||||
br label %"5"
|
||||
|
||||
"5":
|
||||
%iv0 = phi i32 [ %iv0.inc, %"6" ], [ 0, %"5.preheader" ]
|
||||
%iv1 = phi i64 [ %iv1.inc, %"6" ], [ 48, %"5.preheader" ]
|
||||
%iv0.inc = add nsw i32 %iv0, 1
|
||||
%cmp = icmp eq i32 %iv0.inc, 0
|
||||
br i1 %cmp, label %"7", label %"6"
|
||||
|
||||
"6":
|
||||
%iv1.inc = add i64 %iv1, 48
|
||||
%iv1.ofs = add i64 %iv1, 40
|
||||
%ptr8 = getelementptr inbounds i8* undef, i64 %iv1.ofs
|
||||
%ptr32 = bitcast i8* %ptr8 to i32**
|
||||
%v = load i32** %ptr32, align 8
|
||||
br i1 %cmp0, label %"8", label %"5"
|
||||
|
||||
"7":
|
||||
%iv.inc64 = sext i32 %iv0.inc to i64
|
||||
%mul = mul i64 %iv.inc64, 48
|
||||
br label %"8"
|
||||
|
||||
"8": ; preds = %"7", %"5", %"4", %"3", %entry
|
||||
%phi32 = phi i32 [ %iv0.inc, %"7" ], [ 0, %"4" ], [ 0, %"3" ], [ -1000000, %entry ], [ %iv0.inc, %"6" ]
|
||||
%phi64 = phi i64 [ %mul, %"7" ], [ 0, %"4" ], [ 0, %"3" ], [ -48000000, %entry ], [ %iv1, %"6" ]
|
||||
ret i64 %phi64
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user