mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
SLPVectorizer: Don't vectorize volatile memory operations
radar://15231682 Reapply r192799, http://lab.llvm.org:8011/builders/lldb-x86_64-debian-clang/builds/8226 showed that the bot is still broken even with this out. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192820 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
48320e0de7
commit
fc1604ec72
@ -786,13 +786,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
||||
}
|
||||
case Instruction::Load: {
|
||||
// Check if the loads are consecutive or of we need to swizzle them.
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
|
||||
if (!isConsecutiveAccess(VL[i], VL[i + 1])) {
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
|
||||
LoadInst *L = cast<LoadInst>(VL[i]);
|
||||
if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) {
|
||||
newTreeEntry(VL, false);
|
||||
DEBUG(dbgs() << "SLP: Need to swizzle loads.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
newTreeEntry(VL, true);
|
||||
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
||||
return;
|
||||
@ -1911,6 +1912,10 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
|
||||
if (!SI)
|
||||
continue;
|
||||
|
||||
// Don't touch volatile stores.
|
||||
if (!SI->isSimple())
|
||||
continue;
|
||||
|
||||
// Check that the pointer points to scalars.
|
||||
Type *Ty = SI->getValueOperand()->getType();
|
||||
if (Ty->isAggregateType() || Ty->isVectorTy())
|
||||
|
@ -44,3 +44,46 @@ entry:
|
||||
store double %mul5, double* %arrayidx5, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Don't vectorize volatile loads.
|
||||
; CHECK: test_volatile_load
|
||||
; CHECK-NOT: load <2 x double>
|
||||
; CHECK: store <2 x double>
|
||||
; CHECK: ret
|
||||
define void @test_volatile_load(double* %a, double* %b, double* %c) {
|
||||
entry:
|
||||
%i0 = load volatile double* %a, align 8
|
||||
%i1 = load volatile double* %b, align 8
|
||||
%mul = fmul double %i0, %i1
|
||||
%arrayidx3 = getelementptr inbounds double* %a, i64 1
|
||||
%i3 = load double* %arrayidx3, align 8
|
||||
%arrayidx4 = getelementptr inbounds double* %b, i64 1
|
||||
%i4 = load double* %arrayidx4, align 8
|
||||
%mul5 = fmul double %i3, %i4
|
||||
store double %mul, double* %c, align 8
|
||||
%arrayidx5 = getelementptr inbounds double* %c, i64 1
|
||||
store double %mul5, double* %arrayidx5, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
; Don't vectorize volatile stores.
|
||||
; CHECK: test_volatile_store
|
||||
; CHECK-NOT: store <2 x double>
|
||||
; CHECK: ret
|
||||
define void @test_volatile_store(double* %a, double* %b, double* %c) {
|
||||
entry:
|
||||
%i0 = load double* %a, align 8
|
||||
%i1 = load double* %b, align 8
|
||||
%mul = fmul double %i0, %i1
|
||||
%arrayidx3 = getelementptr inbounds double* %a, i64 1
|
||||
%i3 = load double* %arrayidx3, align 8
|
||||
%arrayidx4 = getelementptr inbounds double* %b, i64 1
|
||||
%i4 = load double* %arrayidx4, align 8
|
||||
%mul5 = fmul double %i3, %i4
|
||||
store volatile double %mul, double* %c, align 8
|
||||
%arrayidx5 = getelementptr inbounds double* %c, i64 1
|
||||
store volatile double %mul5, double* %arrayidx5, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user