[SROA] Don't de-atomic volatile loads and stores

Volatile loads and stores are made visible in global state regardless of
what memory is involved.  It is not correct to disregard the ordering
and synchronization scope because it is possible to synchronize with
memory operations performed by hardware.

This partially addresses PR23737.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242126 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
David Majnemer 2015-07-14 06:19:58 +00:00
parent 6f1e023b46
commit 2a27389edc
2 changed files with 26 additions and 6 deletions

View File

@ -2593,13 +2593,21 @@ private:
V = rewriteIntegerLoad(LI);
} else if (NewBeginOffset == NewAllocaBeginOffset &&
canConvertValue(DL, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), LI.isVolatile(),
LI.getName());
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
V = NewLI;
} else {
Type *LTy = TargetTy->getPointerTo();
V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
getSliceAlign(TargetTy), LI.isVolatile(),
LI.getName());
LoadInst *NewLI = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy),
getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
V = NewLI;
IsPtrAdjusted = true;
}
V = convertValue(DL, IRB, V, TargetTy);
@ -2722,7 +2730,8 @@ private:
NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()),
SI.isVolatile());
}
(void)NewSI;
if (SI.isVolatile())
NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope());
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);

View File

@ -1595,3 +1595,14 @@ entry:
store i32 %load, i32* %a.gep1
ret void
}
define void @PR23737() {
; CHECK-LABEL: @PR23737(
; CHECK: store atomic volatile {{.*}} seq_cst
; CHECK: load atomic volatile {{.*}} seq_cst
entry:
%ptr = alloca i64, align 8
store atomic volatile i64 0, i64* %ptr seq_cst, align 8
%load = load atomic volatile i64, i64* %ptr seq_cst, align 8
ret void
}