mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-07-08 18:30:04 +00:00
TTI: Honour cost model for estimating cost of vector-intrinsic and calls.
Review: http://reviews.llvm.org/D8096 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@232528 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
3a660c933a
commit
6e046d3810
@ -528,18 +528,29 @@ public:
|
||||
// Assume that we need to scalarize this intrinsic.
|
||||
unsigned ScalarizationCost = 0;
|
||||
unsigned ScalarCalls = 1;
|
||||
Type *ScalarRetTy = RetTy;
|
||||
if (RetTy->isVectorTy()) {
|
||||
ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
|
||||
ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
|
||||
ScalarRetTy = RetTy->getScalarType();
|
||||
}
|
||||
SmallVector<Type *, 4> ScalarTys;
|
||||
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
|
||||
if (Tys[i]->isVectorTy()) {
|
||||
ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
|
||||
ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
|
||||
Type *Ty = Tys[i];
|
||||
if (Ty->isVectorTy()) {
|
||||
ScalarizationCost += getScalarizationOverhead(Ty, false, true);
|
||||
ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
|
||||
Ty = Ty->getScalarType();
|
||||
}
|
||||
ScalarTys.push_back(Ty);
|
||||
}
|
||||
if (ScalarCalls == 1)
|
||||
return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
|
||||
|
||||
return ScalarCalls + ScalarizationCost;
|
||||
unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
|
||||
IID, ScalarRetTy, ScalarTys);
|
||||
|
||||
return ScalarCalls * ScalarCost + ScalarizationCost;
|
||||
}
|
||||
// Look for intrinsics that can be lowered directly or turned into a scalar
|
||||
// intrinsic call.
|
||||
@ -649,10 +660,25 @@ public:
|
||||
// this will emit a costly libcall, adding call overhead and spills. Make it
|
||||
// very expensive.
|
||||
if (RetTy->isVectorTy()) {
|
||||
unsigned Num = RetTy->getVectorNumElements();
|
||||
unsigned Cost = static_cast<T *>(this)->getIntrinsicInstrCost(
|
||||
IID, RetTy->getScalarType(), Tys);
|
||||
return 10 * Cost * Num;
|
||||
unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
|
||||
unsigned ScalarCalls = RetTy->getVectorNumElements();
|
||||
SmallVector<Type *, 4> ScalarTys;
|
||||
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
|
||||
Type *Ty = Tys[i];
|
||||
if (Ty->isVectorTy())
|
||||
Ty = Ty->getScalarType();
|
||||
ScalarTys.push_back(Ty);
|
||||
}
|
||||
unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
|
||||
IID, RetTy->getScalarType(), ScalarTys);
|
||||
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
|
||||
if (Tys[i]->isVectorTy()) {
|
||||
ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
|
||||
ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
|
||||
}
|
||||
}
|
||||
|
||||
return ScalarCalls * ScalarCost + ScalarizationCost;
|
||||
}
|
||||
|
||||
// This is going to be turned into a library call, make it expensive.
|
||||
|
@ -22,7 +22,7 @@ for.end: ; preds = %vector.body
|
||||
ret void
|
||||
|
||||
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test1':
|
||||
; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
|
||||
; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
|
||||
|
||||
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test1':
|
||||
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
|
||||
@ -50,7 +50,7 @@ for.end: ; preds = %vector.body
|
||||
ret void
|
||||
|
||||
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test2':
|
||||
; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
|
||||
; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
|
||||
|
||||
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test2':
|
||||
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
|
||||
|
@ -15,9 +15,9 @@ target triple = "x86_64-apple-macosx10.8.0"
|
||||
; The source code for the test:
|
||||
;
|
||||
; #include <math.h>
|
||||
; void foo(float* restrict A, float * restrict B, int size)
|
||||
; void foo(float* restrict A, float * restrict B)
|
||||
; {
|
||||
; for (int i = 0; i < size; ++i) A[i] = sinf(B[i]);
|
||||
; for (int i = 0; i < 1000; i+=2) A[i] = sinf(B[i]);
|
||||
; }
|
||||
;
|
||||
|
||||
@ -25,24 +25,20 @@ target triple = "x86_64-apple-macosx10.8.0"
|
||||
; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
|
||||
;
|
||||
|
||||
define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
|
||||
define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
|
||||
entry:
|
||||
%cmp6 = icmp sgt i32 %size, 0
|
||||
br i1 %cmp6, label %for.body.preheader, label %for.end
|
||||
|
||||
for.body.preheader:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
|
||||
%0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
|
||||
%call = tail call float @llvm.sin.f32(float %0)
|
||||
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
|
||||
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
|
||||
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
||||
%exitcond = icmp eq i32 %lftr.wideiv, %size
|
||||
%exitcond = icmp eq i32 %lftr.wideiv, 1000
|
||||
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
|
||||
|
||||
for.end.loopexit:
|
||||
@ -59,24 +55,20 @@ for.end:
|
||||
; This method will not be vectorized, as scalar cost is lower than any of vector costs.
|
||||
;
|
||||
|
||||
define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
|
||||
define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
|
||||
entry:
|
||||
%cmp6 = icmp sgt i32 %size, 0
|
||||
br i1 %cmp6, label %for.body.preheader, label %for.end
|
||||
|
||||
for.body.preheader:
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
|
||||
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
|
||||
%0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
|
||||
%call = tail call float @llvm.sin.f32(float %0)
|
||||
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
|
||||
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
|
||||
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
||||
%exitcond = icmp eq i32 %lftr.wideiv, %size
|
||||
%exitcond = icmp eq i32 %lftr.wideiv, 1000
|
||||
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
|
||||
|
||||
for.end.loopexit:
|
||||
|
Loading…
Reference in New Issue
Block a user