mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
Fix fallout in CodeGenPrepare from 56526. Will likely need more work.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@56546 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2a5196fe67
commit
692bf6b85e
@ -927,6 +927,23 @@ bool CodeGenPrepare::OptimizeLoadStoreInst(Instruction *LdStInst, Value *Addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
|
||||
/// processed uses a memory 'm' constraint.
|
||||
static bool
|
||||
hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
|
||||
const TargetLowering *TLI) {
|
||||
for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
|
||||
InlineAsm::ConstraintInfo &CI = CInfos[i];
|
||||
for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
|
||||
TargetLowering::ConstraintType CType = TLI->getConstraintType(CI.Codes[j]);
|
||||
if (CType == TargetLowering::C_Memory)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// OptimizeInlineAsmInst - If there are any memory operands, use
|
||||
/// OptimizeLoadStoreInt to sink their address computing into the block when
|
||||
/// possible / profitable.
|
||||
@ -963,7 +980,8 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
|
||||
}
|
||||
|
||||
// Compute the constraint code and ConstraintType to use.
|
||||
TLI->ComputeConstraintToUse(OpInfo, SDValue());
|
||||
bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
|
||||
TLI->ComputeConstraintToUse(OpInfo, SDValue(), hasMemory);
|
||||
|
||||
if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
|
||||
OpInfo.isIndirect) {
|
||||
@ -1132,4 +1150,3 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
|
||||
|
||||
return MadeChange;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user