mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-24 23:28:41 +00:00
Fix PR2076. CodeGenPrepare now sinks address computation for inline asm memory
operands into inline asm block. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47589 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
#include "llvm/Constants.h"
|
||||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/Function.h"
|
||||
#include "llvm/InlineAsm.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Target/TargetAsmInfo.h"
|
||||
@@ -28,6 +29,7 @@
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/Support/CallSite.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
@@ -58,6 +60,8 @@ namespace {
|
||||
bool OptimizeLoadStoreInst(Instruction *I, Value *Addr,
|
||||
const Type *AccessTy,
|
||||
DenseMap<Value*,Value*> &SunkAddrs);
|
||||
bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
||||
DenseMap<Value*,Value*> &SunkAddrs);
|
||||
bool OptimizeExtUses(Instruction *I);
|
||||
};
|
||||
}
|
||||
@@ -928,6 +932,54 @@ bool CodeGenPrepare::OptimizeLoadStoreInst(Instruction *LdStInst, Value *Addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// OptimizeInlineAsmInst - If there are any memory operands, use
|
||||
/// OptimizeLoadStoreInt to sink their address computing into the block when
|
||||
/// possible / profitable.
|
||||
bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
||||
DenseMap<Value*,Value*> &SunkAddrs) {
|
||||
bool MadeChange = false;
|
||||
InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
|
||||
|
||||
// Do a prepass over the constraints, canonicalizing them, and building up the
|
||||
// ConstraintOperands list.
|
||||
std::vector<InlineAsm::ConstraintInfo>
|
||||
ConstraintInfos = IA->ParseConstraints();
|
||||
|
||||
/// ConstraintOperands - Information about all of the constraints.
|
||||
std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands;
|
||||
unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
|
||||
for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
|
||||
ConstraintOperands.
|
||||
push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i]));
|
||||
TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back();
|
||||
|
||||
// Compute the value type for each operand.
|
||||
switch (OpInfo.Type) {
|
||||
case InlineAsm::isOutput:
|
||||
if (OpInfo.isIndirect)
|
||||
OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
|
||||
break;
|
||||
case InlineAsm::isInput:
|
||||
OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
|
||||
break;
|
||||
case InlineAsm::isClobber:
|
||||
// Nothing to do.
|
||||
break;
|
||||
}
|
||||
|
||||
// Compute the constraint code and ConstraintType to use.
|
||||
OpInfo.ComputeConstraintToUse(*TLI);
|
||||
|
||||
if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
|
||||
Value *OpVal = OpInfo.CallOperandVal;
|
||||
MadeChange |= OptimizeLoadStoreInst(I, OpVal, OpVal->getType(),
|
||||
SunkAddrs);
|
||||
}
|
||||
}
|
||||
|
||||
return MadeChange;
|
||||
}
|
||||
|
||||
bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
||||
BasicBlock *DefBB = I->getParent();
|
||||
|
||||
@@ -1076,6 +1128,9 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
|
||||
TLI->getTargetMachine().getTargetAsmInfo()) {
|
||||
if (TAI->ExpandInlineAsm(CI))
|
||||
BBI = BB.begin();
|
||||
else
|
||||
// Sink address computing for memory operands into the block.
|
||||
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user