ARM cost model: Fix costs for some vector selects

I was too pessimistic in r177105. Vector selects that fit into a legal register
type lower just fine. I was mislead by the code fragment that I was using. The
stores/loads that I saw in those cases came from lowering the conditional off
an address.

Changing the code fragment to:

%T0_3 = type <8 x i18>
%T1_3 = type <8 x i1>

define void @func_blend3(%T0_3* %loadaddr, %T0_3* %loadaddr2,
                         %T1_3* %blend, %T0_3* %storeaddr) {
  %v0 = load %T0_3* %loadaddr
  %v1 = load %T0_3* %loadaddr2
==> FROM:
  ;%c = load %T1_3* %blend
==> TO:
  %c = icmp slt %T0_3 %v0, %v1
==> USE:
  %r = select %T1_3 %c, %T0_3 %v0, %T0_3 %v1

  store %T0_3 %r, %T0_3* %storeaddr
  ret void
}

revealed this mistake.

radar://13403975

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177170 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Arnold Schwaighofer 2013-03-15 18:31:01 +00:00
parent bcbf3fddef
commit 5193e4ebe2
3 changed files with 15 additions and 116 deletions

View File

@ -351,11 +351,6 @@ unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
// Lowering of some vector selects is currently far from perfect.
static const TypeConversionCostTblEntry<MVT> NEONVectorSelectTbl[] = {
{ ISD::SELECT, MVT::v4i1, MVT::v4i8, 2*4 + 2*1 },
{ ISD::SELECT, MVT::v8i1, MVT::v8i8, 2*8 + 1 },
{ ISD::SELECT, MVT::v16i1, MVT::v16i8, 2*16 + 1 },
{ ISD::SELECT, MVT::v4i1, MVT::v4i16, 2*4 + 1 },
{ ISD::SELECT, MVT::v8i1, MVT::v8i16, 2*8 + 1 },
{ ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 },
{ ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 },
{ ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 },

View File

@ -21,18 +21,18 @@ define void @casts() {
; Vector values
; CHECK: cost of 1 {{.*}} select
%v7 = select <2 x i1> undef, <2 x i8> undef, <2 x i8> undef
; CHECK: cost of 10 {{.*}} select
; CHECK: cost of 1 {{.*}} select
%v8 = select <4 x i1> undef, <4 x i8> undef, <4 x i8> undef
; CHECK: cost of 17 {{.*}} select
; CHECK: cost of 1 {{.*}} select
%v9 = select <8 x i1> undef, <8 x i8> undef, <8 x i8> undef
; CHECK: cost of 33 {{.*}} select
; CHECK: cost of 1 {{.*}} select
%v10 = select <16 x i1> undef, <16 x i8> undef, <16 x i8> undef
; CHECK: cost of 1 {{.*}} select
%v11 = select <2 x i1> undef, <2 x i16> undef, <2 x i16> undef
; CHECK: cost of 9 {{.*}} select
; CHECK: cost of 1 {{.*}} select
%v12 = select <4 x i1> undef, <4 x i16> undef, <4 x i16> undef
; CHECK: cost of 17 {{.*}} select
; CHECK: cost of 1 {{.*}} select
%v13 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> undef
; CHECK: cost of 40 {{.*}} select
%v13b = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef

View File

@ -12,102 +12,6 @@ define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) {
; We adjusted the cost model of the following selects. When we improve code
; lowering we also need to adjust the cost.
; RUN: opt < %s -cost-model -analyze -mtriple=thumbv7-apple-ios6.0.0 -march=arm -mcpu=cortex-a8 | FileCheck %s --check-prefix=COST
%T0_3 = type <4 x i8>
%T1_3 = type <4 x i1>
; CHECK: func_blend3:
define void @func_blend3(%T0_3* %loadaddr, %T0_3* %loadaddr2,
%T1_3* %blend, %T0_3* %storeaddr) {
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: vldr
%v0 = load %T0_3* %loadaddr
%v1 = load %T0_3* %loadaddr2
%c = load %T1_3* %blend
; COST: func_blend3
; COST: cost of 10 {{.*}} select
%r = select %T1_3 %c, %T0_3 %v0, %T0_3 %v1
store %T0_3 %r, %T0_3* %storeaddr
ret void
}
%T0_4 = type <8 x i8>
%T1_4 = type <8 x i1>
; CHECK: func_blend4:
define void @func_blend4(%T0_4* %loadaddr, %T0_4* %loadaddr2,
%T1_4* %blend, %T0_4* %storeaddr) {
%v0 = load %T0_4* %loadaddr
%v1 = load %T0_4* %loadaddr2
%c = load %T1_4* %blend
; check: strb
; check: strb
; check: strb
; check: strb
; check: vldr
; COST: func_blend4
; COST: cost of 17 {{.*}} select
%r = select %T1_4 %c, %T0_4 %v0, %T0_4 %v1
store %T0_4 %r, %T0_4* %storeaddr
ret void
}
%T0_5 = type <16 x i8>
%T1_5 = type <16 x i1>
; CHECK: func_blend5:
define void @func_blend5(%T0_5* %loadaddr, %T0_5* %loadaddr2,
%T1_5* %blend, %T0_5* %storeaddr) {
%v0 = load %T0_5* %loadaddr
%v1 = load %T0_5* %loadaddr2
%c = load %T1_5* %blend
; CHECK: strb
; CHECK: strb
; CHECK: strb
; CHECK: strb
; CHECK: vld
; COST: func_blend5
; COST: cost of 33 {{.*}} select
%r = select %T1_5 %c, %T0_5 %v0, %T0_5 %v1
store %T0_5 %r, %T0_5* %storeaddr
ret void
}
%T0_8 = type <4 x i16>
%T1_8 = type <4 x i1>
; CHECK: func_blend8:
define void @func_blend8(%T0_8* %loadaddr, %T0_8* %loadaddr2,
%T1_8* %blend, %T0_8* %storeaddr) {
%v0 = load %T0_8* %loadaddr
%v1 = load %T0_8* %loadaddr2
%c = load %T1_8* %blend
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: vld
; COST: func_blend8
; COST: cost of 9 {{.*}} select
%r = select %T1_8 %c, %T0_8 %v0, %T0_8 %v1
store %T0_8 %r, %T0_8* %storeaddr
ret void
}
%T0_9 = type <8 x i16>
%T1_9 = type <8 x i1>
; CHECK: func_blend9:
define void @func_blend9(%T0_9* %loadaddr, %T0_9* %loadaddr2,
%T1_9* %blend, %T0_9* %storeaddr) {
%v0 = load %T0_9* %loadaddr
%v1 = load %T0_9* %loadaddr2
%c = load %T1_9* %blend
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: strh
; CHECK: vld
; COST: func_blend9
; COST: cost of 17 {{.*}} select
%r = select %T1_9 %c, %T0_9 %v0, %T0_9 %v1
store %T0_9 %r, %T0_9* %storeaddr
ret void
}
%T0_10 = type <16 x i16>
%T1_10 = type <16 x i1>
; CHECK: func_blend10:
@ -115,11 +19,11 @@ define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2,
%T1_10* %blend, %T0_10* %storeaddr) {
%v0 = load %T0_10* %loadaddr
%v1 = load %T0_10* %loadaddr2
%c = load %T1_10* %blend
; CHECK: strb
; CHECK: strb
; CHECK: strb
; CHECK: strb
%c = icmp slt %T0_10 %v0, %v1
; CHECK: vst1
; CHECK: vst1
; CHECK: vst1
; CHECK: vst1
; CHECK: vld
; COST: func_blend10
; COST: cost of 40 {{.*}} select
@ -134,7 +38,7 @@ define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2,
%T1_14* %blend, %T0_14* %storeaddr) {
%v0 = load %T0_14* %loadaddr
%v1 = load %T0_14* %loadaddr2
%c = load %T1_14* %blend
%c = icmp slt %T0_14 %v0, %v1
; CHECK: strb
; CHECK: strb
; CHECK: strb
@ -152,7 +56,7 @@ define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2,
%T1_15* %blend, %T0_15* %storeaddr) {
%v0 = load %T0_15* %loadaddr
%v1 = load %T0_15* %loadaddr2
%c = load %T1_15* %blend
%c = icmp slt %T0_15 %v0, %v1
; CHECK: strb
; CHECK: strb
; CHECK: strb
@ -170,7 +74,7 @@ define void @func_blend18(%T0_18* %loadaddr, %T0_18* %loadaddr2,
%T1_18* %blend, %T0_18* %storeaddr) {
%v0 = load %T0_18* %loadaddr
%v1 = load %T0_18* %loadaddr2
%c = load %T1_18* %blend
%c = icmp slt %T0_18 %v0, %v1
; CHECK: strh
; CHECK: strh
; CHECK: strh
@ -188,7 +92,7 @@ define void @func_blend19(%T0_19* %loadaddr, %T0_19* %loadaddr2,
%T1_19* %blend, %T0_19* %storeaddr) {
%v0 = load %T0_19* %loadaddr
%v1 = load %T0_19* %loadaddr2
%c = load %T1_19* %blend
%c = icmp slt %T0_19 %v0, %v1
; CHECK: strb
; CHECK: strb
; CHECK: strb
@ -206,7 +110,7 @@ define void @func_blend20(%T0_20* %loadaddr, %T0_20* %loadaddr2,
%T1_20* %blend, %T0_20* %storeaddr) {
%v0 = load %T0_20* %loadaddr
%v1 = load %T0_20* %loadaddr2
%c = load %T1_20* %blend
%c = icmp slt %T0_20 %v0, %v1
; CHECK: strb
; CHECK: strb
; CHECK: strb