mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-02 07:11:49 +00:00
ARM cost model: Unaligned vectorized double stores are expensive
Updated a test case that assumed that <2 x double> would vectorize to use <4 x float>. radar://15338229 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193574 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
7e8cebf22d
commit
c04d241d13
@ -129,6 +129,9 @@ public:
|
||||
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
|
||||
OperandValueKind Op1Info = OK_AnyValue,
|
||||
OperandValueKind Op2Info = OK_AnyValue) const;
|
||||
|
||||
unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
|
||||
unsigned AddressSpace) const;
|
||||
/// @}
|
||||
};
|
||||
|
||||
@ -540,3 +543,15 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
|
||||
return Cost;
|
||||
}
|
||||
|
||||
unsigned ARMTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
|
||||
unsigned AddressSpace) const {
|
||||
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
|
||||
|
||||
if (Src->isVectorTy() && Alignment != 16 &&
|
||||
Src->getVectorElementType()->isDoubleTy()) {
|
||||
// Unaligned loads/stores are extremely inefficient.
|
||||
// We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
|
||||
return LT.first * 4;
|
||||
}
|
||||
return LT.first;
|
||||
}
|
||||
|
@ -3,27 +3,27 @@
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
|
||||
target triple = "thumbv7-apple-ios3.0.0"
|
||||
|
||||
;CHECK:foo_F64
|
||||
;CHECK: <2 x double>
|
||||
;CHECK:foo_F32
|
||||
;CHECK: <4 x float>
|
||||
;CHECK:ret
|
||||
define double @foo_F64(double* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
|
||||
define float @foo_F32(float* nocapture %A, i32 %n) nounwind uwtable readonly ssp {
|
||||
%1 = icmp sgt i32 %n, 0
|
||||
br i1 %1, label %.lr.ph, label %._crit_edge
|
||||
|
||||
.lr.ph: ; preds = %0, %.lr.ph
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
|
||||
%prod.01 = phi double [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
|
||||
%2 = getelementptr inbounds double* %A, i64 %indvars.iv
|
||||
%3 = load double* %2, align 8
|
||||
%4 = fmul fast double %prod.01, %3
|
||||
%prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
|
||||
%2 = getelementptr inbounds float* %A, i64 %indvars.iv
|
||||
%3 = load float* %2, align 8
|
||||
%4 = fmul fast float %prod.01, %3
|
||||
%indvars.iv.next = add i64 %indvars.iv, 1
|
||||
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
||||
%exitcond = icmp eq i32 %lftr.wideiv, %n
|
||||
br i1 %exitcond, label %._crit_edge, label %.lr.ph
|
||||
|
||||
._crit_edge: ; preds = %.lr.ph, %0
|
||||
%prod.0.lcssa = phi double [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
|
||||
ret double %prod.0.lcssa
|
||||
%prod.0.lcssa = phi float [ 0.000000e+00, %0 ], [ %4, %.lr.ph ]
|
||||
ret float %prod.0.lcssa
|
||||
}
|
||||
|
||||
;CHECK:foo_I8
|
||||
|
20
test/Transforms/SLPVectorizer/ARM/memory.ll
Normal file
20
test/Transforms/SLPVectorizer/ARM/memory.ll
Normal file
@ -0,0 +1,20 @@
|
||||
; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
|
||||
|
||||
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
|
||||
|
||||
; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
|
||||
; to do this scalar.
|
||||
|
||||
; CHECK-LABEL: expensive_double_store
|
||||
; CHECK-NOT: load <2 x double>
|
||||
; CHECK-NOT: store <2 x double>
|
||||
define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
|
||||
entry:
|
||||
%0 = load double* %src, align 8
|
||||
store double %0, double* %dst, align 8
|
||||
%arrayidx2 = getelementptr inbounds double* %src, i64 1
|
||||
%1 = load double* %arrayidx2, align 8
|
||||
%arrayidx3 = getelementptr inbounds double* %dst, i64 1
|
||||
store double %1, double* %arrayidx3, align 8
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user