use ExpandInlineAsm on TargetLowering instead of TargetAsmInfo.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76442 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2009-07-20 17:52:52 +00:00
parent b810565152
commit 8850b36d0f

View File

@ -23,10 +23,8 @@
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Utils/AddrModeMatcher.h" #include "llvm/Transforms/Utils/AddrModeMatcher.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
@ -859,18 +857,16 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
} else if (CallInst *CI = dyn_cast<CallInst>(I)) { } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
// If we found an inline asm expession, and if the target knows how to // If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now. // lower it to normal LLVM code, do so now.
if (TLI && isa<InlineAsm>(CI->getCalledValue())) if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
if (const TargetAsmInfo *TAI = if (TLI->ExpandInlineAsm(CI)) {
TLI->getTargetMachine().getTargetAsmInfo()) { BBI = BB.begin();
if (TAI->ExpandInlineAsm(CI)) { // Avoid processing instructions out of order, which could cause
BBI = BB.begin(); // reuse before a value is defined.
// Avoid processing instructions out of order, which could cause SunkAddrs.clear();
// reuse before a value is defined. } else
SunkAddrs.clear(); // Sink address computing for memory operands into the block.
} else MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
// Sink address computing for memory operands into the block. }
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}
} }
} }