mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-04-01 03:33:42 +00:00
[SROA] Don't de-atomic volatile loads and stores
Volatile loads and stores are made visible in global state regardless of what memory is involved. It is not correct to disregard the ordering and synchronization scope because it is possible to synchronize with memory operations performed by hardware. This partially addresses PR23737. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242126 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
6f1e023b46
commit
2a27389edc
@ -2593,13 +2593,21 @@ private:
|
||||
V = rewriteIntegerLoad(LI);
|
||||
} else if (NewBeginOffset == NewAllocaBeginOffset &&
|
||||
canConvertValue(DL, NewAllocaTy, LI.getType())) {
|
||||
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), LI.isVolatile(),
|
||||
LI.getName());
|
||||
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
|
||||
LI.isVolatile(), LI.getName());
|
||||
if (LI.isVolatile())
|
||||
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
||||
|
||||
V = NewLI;
|
||||
} else {
|
||||
Type *LTy = TargetTy->getPointerTo();
|
||||
V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
|
||||
getSliceAlign(TargetTy), LI.isVolatile(),
|
||||
LI.getName());
|
||||
LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
|
||||
getSliceAlign(TargetTy),
|
||||
LI.isVolatile(), LI.getName());
|
||||
if (LI.isVolatile())
|
||||
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
|
||||
|
||||
V = NewLI;
|
||||
IsPtrAdjusted = true;
|
||||
}
|
||||
V = convertValue(DL, IRB, V, TargetTy);
|
||||
@ -2722,7 +2730,8 @@ private:
|
||||
NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
|
||||
SI.isVolatile());
|
||||
}
|
||||
(void)NewSI;
|
||||
if (SI.isVolatile())
|
||||
NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope());
|
||||
Pass.DeadInsts.insert(&SI);
|
||||
deleteIfTriviallyDead(OldOp);
|
||||
|
||||
|
@ -1595,3 +1595,14 @@ entry:
|
||||
store i32 %load, i32* %a.gep1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @PR23737() {
|
||||
; CHECK-LABEL: @PR23737(
|
||||
; CHECK: store atomic volatile {{.*}} seq_cst
|
||||
; CHECK: load atomic volatile {{.*}} seq_cst
|
||||
entry:
|
||||
%ptr = alloca i64, align 8
|
||||
store atomic volatile i64 0, i64* %ptr seq_cst, align 8
|
||||
%load = load atomic volatile i64, i64* %ptr seq_cst, align 8
|
||||
ret void
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user