Inline asm instructions may have additional <imp-def,kill> register operands.
These operands are not marked with a flag like the normal asm operands, so we
must not assert that there is a flag.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@76373 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen 2009-07-19 19:09:59 +00:00
parent e687151e03
commit 45d34fe358
2 changed files with 17 additions and 2 deletions

View File

@ -732,7 +732,9 @@ isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const {
unsigned DefPart = 0;
for (unsigned i = 1, e = getNumOperands(); i < e; ) {
const MachineOperand &FMO = getOperand(i);
assert(FMO.isImm());
// After the normal asm operands there may be additional imp-def regs.
if (!FMO.isImm())
return false;
// Skip over this def.
unsigned NumOps = InlineAsm::getNumOperandRegisters(FMO.getImm());
unsigned PrevDef = i + 1;
@ -788,7 +790,9 @@ isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const {
unsigned FlagIdx, NumOps=0;
for (FlagIdx = 1; FlagIdx < UseOpIdx; FlagIdx += NumOps+1) {
const MachineOperand &UFMO = getOperand(FlagIdx);
assert(UFMO.isImm() && "Expecting flag operand on inline asm");
// After the normal asm operands there may be additional imp-def regs.
if (!UFMO.isImm())
return false;
NumOps = InlineAsm::getNumOperandRegisters(UFMO.getImm());
assert(NumOps < getNumOperands() && "Invalid inline asm flag");
if (UseOpIdx < FlagIdx+NumOps+1)

View File

@ -0,0 +1,11 @@
; RUN: llvm-as < %s | llc -march=x86-64
; PR4583
define i32 @atomic_cmpset_long(i64* %dst, i64 %exp, i64 %src) nounwind ssp noredzone noimplicitfloat {
entry:
%0 = call i8 asm sideeffect "\09lock ; \09\09\09cmpxchgq $2,$1 ;\09 sete\09$0 ;\09\091:\09\09\09\09# atomic_cmpset_long", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* undef, i64 undef, i64 undef, i64* undef) nounwind ; <i8> [#uses=0]
br label %1
; <label>:1 ; preds = %entry
ret i32 undef
}