--- Merging r127981 into '.':

U    include/llvm/Target/TargetLowering.h
U    lib/Target/X86/X86ISelLowering.cpp
U    lib/Target/X86/X86ISelLowering.h
U    lib/Target/ARM/ARMISelLowering.h
U    lib/Target/ARM/ARMISelLowering.cpp
U    lib/Transforms/Scalar/CodeGenPrepare.cpp
--- Merging r128194 into '.':
G    lib/Transforms/Scalar/CodeGenPrepare.cpp
--- Merging r128196 into '.':
G    lib/Transforms/Scalar/CodeGenPrepare.cpp
--- Merging r128197 into '.':
A    test/CodeGen/X86/tailcall-returndup-void.ll
G    lib/Transforms/Scalar/CodeGenPrepare.cpp



git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_29@128200 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Bill Wendling 2011-03-24 06:00:53 +00:00
parent 450fcbb230
commit 962a797052
7 changed files with 215 additions and 8 deletions

View File

@ -1287,6 +1287,14 @@ public:
return false;
}
/// mayBeEmittedAsTailCall - Return true if the target may be able emit the
/// call instruction as a tail call. This is used by optimization passes to
/// determine if it's profitable to duplicate return instructions to enable
/// tailcall optimization.
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const {
return false;
}
/// LowerOperationWrapper - This callback is invoked by the type legalizer
/// to legalize nodes with an illegal operand type but legal result types.
/// It replaces the LowerOperation callback in the type Legalizer.

View File

@ -1803,6 +1803,16 @@ bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N) const {
return HasRet;
}
bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!EnableARMTailCalls)
return false;
if (!CI->isTailCall())
return false;
return !Subtarget->isThumb1Only();
}
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise

View File

@ -455,6 +455,8 @@ namespace llvm {
virtual bool isUsedByReturnOnly(SDNode *N) const;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
SDValue getVFPCmp(SDValue LHS, SDValue RHS,

View File

@ -45,6 +45,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
@ -1580,6 +1581,18 @@ static bool IsTailCallConvention(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC);
}
bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
if (!CI->isTailCall())
return false;
CallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
return false;
return true;
}
/// FuncIsMadeTailCallSafe - Return true if the function is being made into
/// a tailcall target by changing its ABI.
static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {

View File

@ -843,6 +843,8 @@ namespace llvm {
virtual bool isUsedByReturnOnly(SDNode *N) const;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,

View File

@ -47,16 +47,17 @@ using namespace llvm;
using namespace llvm::PatternMatch;
STATISTIC(NumBlocksElim, "Number of blocks eliminated");
STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
"sunken Cmps");
STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
"of sunken Casts");
STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
"computations were sunk");
STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
STATISTIC(NumRetsDup, "Number of return instructions duplicated");
namespace {
class CodeGenPrepare : public FunctionPass {
@ -71,11 +72,15 @@ namespace {
/// update it.
BasicBlock::iterator CurInstIterator;
// Keeps track of non-local addresses that have been sunk into a block. This
// allows us to avoid inserting duplicate code for blocks with multiple
// load/stores of the same address.
/// Keeps track of non-local addresses that have been sunk into a block.
/// This allows us to avoid inserting duplicate code for blocks with
/// multiple load/stores of the same address.
DenseMap<Value*, Value*> SunkAddrs;
/// UpdateDT - If CFG is modified in anyway, dominator tree may need to
/// be updated.
bool UpdateDT;
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetLowering *tli = 0)
@ -100,6 +105,7 @@ namespace {
bool OptimizeCallInst(CallInst *CI);
bool MoveExtToFormExtLoad(Instruction *I);
bool OptimizeExtUses(Instruction *I);
bool DupRetToEnableTailCallOpts(ReturnInst *RI);
};
}
@ -114,8 +120,10 @@ FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
bool CodeGenPrepare::runOnFunction(Function &F) {
bool EverMadeChange = false;
UpdateDT = false;
DT = getAnalysisIfAvailable<DominatorTree>();
PFI = getAnalysisIfAvailable<ProfileInfo>();
// First pass, eliminate blocks that contain only PHI nodes and an
// unconditional branch.
EverMadeChange |= EliminateMostlyEmptyBlocks(F);
@ -123,13 +131,18 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
BasicBlock *BB = I++;
MadeChange |= OptimizeBlock(*BB);
}
EverMadeChange |= MadeChange;
}
SunkAddrs.clear();
if (UpdateDT && DT)
DT->DT->recalculate(F);
return EverMadeChange;
}
@ -533,6 +546,125 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
return Simplifier.fold(CI, TD);
}
/// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
/// instructions to the predecessor to enable tail call optimizations. The
/// case it is currently looking for is:
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// br label %return
/// bb1:
/// %tmp1 = tail call i32 @f1()
/// br label %return
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// br label %return
/// return:
/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
/// ret i32 %retval
///
/// =>
///
/// bb0:
/// %tmp0 = tail call i32 @f0()
/// ret i32 %tmp0
/// bb1:
/// %tmp1 = tail call i32 @f1()
/// ret i32 %tmp1
/// bb2:
/// %tmp2 = tail call i32 @f2()
/// ret i32 %tmp2
///
bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) {
if (!TLI)
return false;
Value *V = RI->getReturnValue();
PHINode *PN = V ? dyn_cast<PHINode>(V) : NULL;
if (V && !PN)
return false;
BasicBlock *BB = RI->getParent();
if (PN && PN->getParent() != BB)
return false;
// It's not safe to eliminate the sign / zero extension of the return value.
// See llvm::isInTailCallPosition().
const Function *F = BB->getParent();
unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
return false;
// Make sure there are no instructions between the PHI and return, or that the
// return is the first instruction in the block.
if (PN) {
BasicBlock::iterator BI = BB->begin();
do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
if (&*BI != RI)
return false;
} else {
if (&*BB->begin() != RI)
return false;
}
/// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
/// call.
SmallVector<CallInst*, 4> TailCalls;
if (PN) {
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
// Make sure the phi value is indeed produced by the tail call.
if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
TLI->mayBeEmittedAsTailCall(CI))
TailCalls.push_back(CI);
}
} else {
SmallPtrSet<BasicBlock*, 4> VisitedBBs;
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
if (!VisitedBBs.insert(*PI))
continue;
BasicBlock::InstListType &InstList = (*PI)->getInstList();
BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
if (++RI == RE)
continue;
CallInst *CI = dyn_cast<CallInst>(&*RI);
if (CI && CI->getType()->isVoidTy() && TLI->mayBeEmittedAsTailCall(CI))
TailCalls.push_back(CI);
}
}
bool Changed = false;
for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
CallInst *CI = TailCalls[i];
CallSite CS(CI);
// Conservatively require the attributes of the call to match those of the
// return. Ignore noalias because it doesn't affect the call sequence.
unsigned CalleeRetAttr = CS.getAttributes().getRetAttributes();
if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
continue;
// Make sure the call instruction is followed by an unconditional branch to
// the return block.
BasicBlock *CallBB = CI->getParent();
BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
continue;
// Duplicate the return into CallBB.
(void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
UpdateDT = Changed = true;
++NumRetsDup;
}
// If we eliminated all predecessors of the block, delete the block now.
if (Changed && pred_begin(BB) == pred_end(BB))
BB->eraseFromParent();
return Changed;
}
//===----------------------------------------------------------------------===//
// Memory Optimization
//===----------------------------------------------------------------------===//
@ -956,6 +1088,9 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
if (CallInst *CI = dyn_cast<CallInst>(I))
return OptimizeCallInst(CI);
if (ReturnInst *RI = dyn_cast<ReturnInst>(I))
return DupRetToEnableTailCallOpts(RI);
return false;
}

View File

@ -0,0 +1,37 @@
; RUN: llc < %s -march=x86-64 | FileCheck %s
; CHECK: rBM_info
; CHECK-NOT: ret
@sES_closure = external global [0 x i64]
declare cc10 void @sEH_info(i64* noalias nocapture, i64* noalias nocapture, i64* noalias nocapture, i64, i64, i64) align 8
define cc10 void @rBM_info(i64* noalias nocapture %Base_Arg, i64* noalias nocapture %Sp_Arg, i64* noalias nocapture %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind align 8 {
c263:
%ln265 = getelementptr inbounds i64* %Sp_Arg, i64 -2
%ln266 = ptrtoint i64* %ln265 to i64
%ln268 = icmp ult i64 %ln266, %R3_Arg
br i1 %ln268, label %c26a, label %n26p
n26p: ; preds = %c263
br i1 icmp ne (i64 and (i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 7), i64 0), label %c1ZP.i, label %n1ZQ.i
n1ZQ.i: ; preds = %n26p
%ln1ZT.i = load i64* getelementptr inbounds ([0 x i64]* @sES_closure, i64 0, i64 0), align 8
%ln1ZU.i = inttoptr i64 %ln1ZT.i to void (i64*, i64*, i64*, i64, i64, i64)*
tail call cc10 void %ln1ZU.i(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
br label %rBL_info.exit
c1ZP.i: ; preds = %n26p
tail call cc10 void @sEH_info(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 ptrtoint ([0 x i64]* @sES_closure to i64), i64 %R3_Arg) nounwind
br label %rBL_info.exit
rBL_info.exit: ; preds = %c1ZP.i, %n1ZQ.i
ret void
c26a: ; preds = %c263
%ln27h = getelementptr inbounds i64* %Base_Arg, i64 -2
%ln27j = load i64* %ln27h, align 8
%ln27k = inttoptr i64 %ln27j to void (i64*, i64*, i64*, i64, i64, i64)*
tail call cc10 void %ln27k(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind
ret void
}