mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 04:30:23 +00:00
X86: remove temporary atomicrmw used during lowering.
We construct a temporary "atomicrmw xchg" instruction when lowering atomic stores for widths that aren't supported natively. This isn't on the top-level worklist though, so it won't be removed automatically and we have to do it ourselves once that itself has been lowered. Thanks Saleem for pointing this out! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212948 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
52a51e197f
commit
9e251d6d41
@ -277,8 +277,11 @@ bool X86AtomicExpandPass::expandAtomicStore(StoreInst *SI) {
|
||||
SI->getValueOperand(), Order);
|
||||
|
||||
// Now we have an appropriate swap instruction, lower it as usual.
|
||||
if (shouldExpandAtomicRMW(AI))
|
||||
return expandAtomicRMW(AI);
|
||||
if (shouldExpandAtomicRMW(AI)) {
|
||||
expandAtomicRMW(AI);
|
||||
AI->eraseFromParent();
|
||||
return true;
|
||||
}
|
||||
|
||||
return AI;
|
||||
}
|
||||
|
@ -277,6 +277,7 @@ define void @atomic_store_seq_cst(i128* %p, i128 %in) {
|
||||
; CHECK: lock
|
||||
; CHECK: cmpxchg16b (%rdi)
|
||||
; CHECK: jne [[LOOP]]
|
||||
; CHECK-NOT: callq ___sync_lock_test_and_set_16
|
||||
|
||||
store atomic i128 %in, i128* %p seq_cst, align 16
|
||||
ret void
|
||||
|
Loading…
Reference in New Issue
Block a user