mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-08 00:24:25 +00:00
X86 cost model: Vectorizing integer division is a bad idea
radar://14057959 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@184872 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@ -196,6 +196,16 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
|
|||||||
{ ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
|
||||||
{ ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
|
||||||
{ ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
|
||||||
|
|
||||||
|
// Vectorizing division is a bad idea. See the SSE2 table for more comments.
|
||||||
|
{ ISD::SDIV, MVT::v32i8, 32*20 },
|
||||||
|
{ ISD::SDIV, MVT::v16i16, 16*20 },
|
||||||
|
{ ISD::SDIV, MVT::v8i32, 8*20 },
|
||||||
|
{ ISD::SDIV, MVT::v4i64, 4*20 },
|
||||||
|
{ ISD::UDIV, MVT::v32i8, 32*20 },
|
||||||
|
{ ISD::UDIV, MVT::v16i16, 16*20 },
|
||||||
|
{ ISD::UDIV, MVT::v8i32, 8*20 },
|
||||||
|
{ ISD::UDIV, MVT::v4i64, 4*20 },
|
||||||
};
|
};
|
||||||
|
|
||||||
// Look for AVX2 lowering tricks.
|
// Look for AVX2 lowering tricks.
|
||||||
@ -258,6 +268,21 @@ unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
|
|||||||
{ ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
|
||||||
{ ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
|
||||||
{ ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
|
{ ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
|
||||||
|
|
||||||
|
// It is not a good idea to vectorize division. We have to scalarize it and
|
||||||
|
// in the process we will often end up having to spilling regular
|
||||||
|
// registers. The overhead of division is going to dominate most kernels
|
||||||
|
// anyways so try hard to prevent vectorization of division - it is
|
||||||
|
// generally a bad idea. Assume somewhat arbitrarily that we have to be able
|
||||||
|
// to hide "20 cycles" for each lane.
|
||||||
|
{ ISD::SDIV, MVT::v16i8, 16*20 },
|
||||||
|
{ ISD::SDIV, MVT::v8i16, 8*20 },
|
||||||
|
{ ISD::SDIV, MVT::v4i32, 4*20 },
|
||||||
|
{ ISD::SDIV, MVT::v2i64, 2*20 },
|
||||||
|
{ ISD::UDIV, MVT::v16i8, 16*20 },
|
||||||
|
{ ISD::UDIV, MVT::v8i16, 8*20 },
|
||||||
|
{ ISD::UDIV, MVT::v4i32, 4*20 },
|
||||||
|
{ ISD::UDIV, MVT::v2i64, 2*20 },
|
||||||
};
|
};
|
||||||
|
|
||||||
if (ST->hasSSE2()) {
|
if (ST->hasSSE2()) {
|
||||||
|
32
test/Analysis/CostModel/X86/div.ll
Normal file
32
test/Analysis/CostModel/X86/div.ll
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -cost-model -analyze < %s | FileCheck --check-prefix=SSE2 %s
|
||||||
|
; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -cost-model -analyze < %s | FileCheck --check-prefix=AVX2 %s
|
||||||
|
|
||||||
|
|
||||||
|
define void @div_sse() {
|
||||||
|
; SSE2: div_sse
|
||||||
|
; SSE2: cost of 320 {{.*}} sdiv
|
||||||
|
%a0 = sdiv <16 x i8> undef, undef
|
||||||
|
; SSE2: cost of 160 {{.*}} sdiv
|
||||||
|
%a1 = sdiv <8 x i16> undef, undef
|
||||||
|
; SSE2: cost of 80 {{.*}} sdiv
|
||||||
|
%a2 = sdiv <4 x i32> undef, undef
|
||||||
|
; SSE2: cost of 40 {{.*}} sdiv
|
||||||
|
%a3 = sdiv <2 x i32> undef, undef
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
; SSE2: div_avx
|
||||||
|
|
||||||
|
define void @div_avx() {
|
||||||
|
; AVX2: div_avx
|
||||||
|
; AVX2: cost of 640 {{.*}} sdiv
|
||||||
|
%a0 = sdiv <32 x i8> undef, undef
|
||||||
|
; AVX2: cost of 320 {{.*}} sdiv
|
||||||
|
%a1 = sdiv <16 x i16> undef, undef
|
||||||
|
; AVX2: cost of 160 {{.*}} sdiv
|
||||||
|
%a2 = sdiv <8 x i32> undef, undef
|
||||||
|
; AVX2: cost of 80 {{.*}} sdiv
|
||||||
|
%a3 = sdiv <4 x i32> undef, undef
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Reference in New Issue
Block a user