mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
[Inliner] Don't inline functions with frameescape calls
Inlining such intrinsics is very difficult, since you need to simultaneously transform many calls to llvm.framerecover and potentially duplicate the functions containing them. Normally this intrinsic isn't added until EH preparation, which is part of the backend pass pipeline after inlining. However, if it were to get fed through the inliner, this change will ensure that it doesn't break the code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@234937 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2e6716eb55
commit
ecc4595ce4
@ -64,6 +64,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
|
||||
bool ContainsNoDuplicateCall;
|
||||
bool HasReturn;
|
||||
bool HasIndirectBr;
|
||||
bool HasFrameEscape;
|
||||
|
||||
/// Number of bytes allocated statically by the callee.
|
||||
uint64_t AllocatedSize;
|
||||
@ -148,12 +149,12 @@ public:
|
||||
IsCallerRecursive(false), IsRecursiveCall(false),
|
||||
ExposesReturnsTwice(false), HasDynamicAlloca(false),
|
||||
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
|
||||
AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
|
||||
FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
|
||||
NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
|
||||
NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
|
||||
NumInstructionsSimplified(0), SROACostSavings(0),
|
||||
SROACostSavingsLost(0) {}
|
||||
HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
|
||||
NumVectorInstructions(0), FiftyPercentVectorBonus(0),
|
||||
TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
|
||||
NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
|
||||
NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
|
||||
SROACostSavings(0), SROACostSavingsLost(0) {}
|
||||
|
||||
bool analyzeCall(CallSite CS);
|
||||
|
||||
@ -743,6 +744,9 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
|
||||
case Intrinsic::memmove:
|
||||
// SROA can usually chew through these intrinsics, but they aren't free.
|
||||
return false;
|
||||
case Intrinsic::frameescape:
|
||||
HasFrameEscape = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -941,7 +945,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
|
||||
|
||||
// If the visit this instruction detected an uninlinable pattern, abort.
|
||||
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
|
||||
HasIndirectBr)
|
||||
HasIndirectBr || HasFrameEscape)
|
||||
return false;
|
||||
|
||||
// If the caller is a recursive function then we don't want to inline
|
||||
@ -1171,7 +1175,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
|
||||
// returns false, and we can bail on out.
|
||||
if (!analyzeBlock(BB, EphValues)) {
|
||||
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
|
||||
HasIndirectBr)
|
||||
HasIndirectBr || HasFrameEscape)
|
||||
return false;
|
||||
|
||||
// If the caller is a recursive function then we don't want to inline
|
||||
@ -1372,6 +1376,13 @@ bool InlineCostAnalysis::isInlineViable(Function &F) {
|
||||
if (!ReturnsTwice && CS.isCall() &&
|
||||
cast<CallInst>(CS.getInstruction())->canReturnTwice())
|
||||
return false;
|
||||
|
||||
// Disallow inlining functions that call @llvm.frameescape. Doing this
|
||||
// correctly would require major changes to the inliner.
|
||||
if (CS.getCalledFunction() &&
|
||||
CS.getCalledFunction()->getIntrinsicID() ==
|
||||
llvm::Intrinsic::frameescape)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
44
test/Transforms/Inline/frameescape.ll
Normal file
44
test/Transforms/Inline/frameescape.ll
Normal file
@ -0,0 +1,44 @@
|
||||
; RUN: opt -inline -S < %s | FileCheck %s
|
||||
|
||||
; PR23216: We can't inline functions using llvm.frameescape.
|
||||
|
||||
declare void @llvm.frameescape(...)
|
||||
declare i8* @llvm.frameaddress(i32)
|
||||
declare i8* @llvm.framerecover(i8*, i8*, i32)
|
||||
|
||||
define internal void @foo(i8* %fp) {
|
||||
%a.i8 = call i8* @llvm.framerecover(i8* bitcast (i32 ()* @bar to i8*), i8* %fp, i32 0)
|
||||
%a = bitcast i8* %a.i8 to i32*
|
||||
store i32 42, i32* %a
|
||||
ret void
|
||||
}
|
||||
|
||||
define internal i32 @bar() {
|
||||
entry:
|
||||
%a = alloca i32
|
||||
call void (...)* @llvm.frameescape(i32* %a)
|
||||
%fp = call i8* @llvm.frameaddress(i32 0)
|
||||
tail call void @foo(i8* %fp)
|
||||
%r = load i32, i32* %a
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; We even bail when someone marks it alwaysinline.
|
||||
define internal i32 @bar_alwaysinline() alwaysinline {
|
||||
entry:
|
||||
%a = alloca i32
|
||||
call void (...)* @llvm.frameescape(i32* %a)
|
||||
tail call void @foo(i8* null)
|
||||
ret i32 0
|
||||
}
|
||||
|
||||
define i32 @bazz() {
|
||||
entry:
|
||||
%r = tail call i32 @bar()
|
||||
%r1 = tail call i32 @bar_alwaysinline()
|
||||
ret i32 %r
|
||||
}
|
||||
|
||||
; CHECK: define i32 @bazz()
|
||||
; CHECK: call i32 @bar()
|
||||
; CHECK: call i32 @bar_alwaysinline()
|
Loading…
Reference in New Issue
Block a user