From 3281412d2a5b00dc23d2da0ecd9c0da6d326a7aa Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 7 Feb 2015 16:14:55 +0000 Subject: [PATCH] [X86] Force fp stack folding tests to keep to specific domain. General boolean instructions (AND, ANDN, OR, XOR) need to use a specific domain instruction (and not just the default). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228495 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/stack-folding-fp-avx1.ll | 33 ++++++++++++++++------ test/CodeGen/X86/stack-folding-fp-sse42.ll | 16 ++++++++--- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/test/CodeGen/X86/stack-folding-fp-avx1.ll b/test/CodeGen/X86/stack-folding-fp-avx1.ll index 26bd39189b3..18cd4179de8 100644 --- a/test/CodeGen/X86/stack-folding-fp-avx1.ll +++ b/test/CodeGen/X86/stack-folding-fp-avx1.ll @@ -147,7 +147,9 @@ define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) { %4 = xor <2 x i64> %2, %5 = and <2 x i64> %4, %3 %6 = bitcast <2 x i64> %5 to <4 x float> - ret <4 x float> %6 + ; fadd forces execution domain + %7 = fadd <4 x float> %6, + ret <4 x float> %7 } define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) { @@ -159,7 +161,9 @@ define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) { %4 = xor <4 x i64> %2, %5 = and <4 x i64> %4, %3 %6 = bitcast <4 x i64> %5 to <8 x float> - ret <8 x float> %6 + ; fadd forces execution domain + %7 = fadd <8 x float> %6, + ret <8 x float> %7 } define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) { @@ -196,7 +200,9 @@ define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = and <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 } define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) { @@ -207,7 +213,9 @@ define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) { %3 = bitcast <8 x float> %a1 to <4 x i64> %4 = and <4 x i64> %2, %3 %5 = bitcast <4 x i64> %4 to <8 x float> - ret <8 x float> %5 + ; fadd forces execution domain + %6 = fadd <8 x float> %5, + ret <8 x float> %6 } define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) { @@ -843,7 +851,6 @@ declare <8 x float> @llvm.x86.avx.dp.ps.256(<8 x float>, <8 x float>, i8) nounwi define <4 x float> @stack_fold_extractf128(<8 x float> %a0, <8 x float> %a1) { ;CHECK-LABEL: stack_fold_extractf128 ;CHECK: vextractf128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spill - ;CHECK: vmovaps {{-?[0-9]*}}(%rsp), %xmm0 {{.*#+}} 16-byte Reload %1 = shufflevector <8 x float> %a0, <8 x float> %a1, <4 x i32> %2 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() ret <4 x float> %1 @@ -1239,7 +1246,9 @@ define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = or <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 } define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) { @@ -1250,7 +1259,9 @@ define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) { %3 = bitcast <8 x float> %a1 to <4 x i64> %4 = or <4 x i64> %2, %3 %5 = bitcast <4 x i64> %4 to <8 x float> - ret <8 x float> %5 + ; fadd forces execution domain + %6 = fadd <8 x float> %5, + ret <8 x float> %6 } define <8 x float> @stack_fold_perm2f128(<8 x float> %a0, <8 x float> %a1) { @@ -1781,7 +1792,9 @@ define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = xor <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 } define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) { @@ -1792,5 +1805,7 @@ define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) { %3 = bitcast <8 x float> %a1 to <4 x i64> %4 = xor <4 x i64> %2, %3 %5 = bitcast <4 x i64> %4 to <8 x float> - ret <8 x float> %5 + ; fadd forces execution domain + %6 = fadd <8 x float> %5, + ret <8 x float> %6 } diff --git a/test/CodeGen/X86/stack-folding-fp-sse42.ll b/test/CodeGen/X86/stack-folding-fp-sse42.ll index 2beccb18ff7..c26cc9df989 100644 --- a/test/CodeGen/X86/stack-folding-fp-sse42.ll +++ b/test/CodeGen/X86/stack-folding-fp-sse42.ll @@ -99,7 +99,9 @@ define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) { %4 = xor <2 x i64> %2, %5 = and <2 x i64> %4, %3 %6 = bitcast <2 x i64> %5 to <4 x float> - ret <4 x float> %6 + ; fadd forces execution domain + %7 = fadd <4 x float> %6, + ret <4 x float> %7 } define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) { @@ -123,7 +125,9 @@ define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = and <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 } define <2 x double> @stack_fold_blendpd(<2 x double> %a0, <2 x double> %a1) { @@ -837,7 +841,9 @@ define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = or <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 } ; TODO stack_fold_rcpps @@ -1077,5 +1083,7 @@ define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) { %3 = bitcast <4 x float> %a1 to <2 x i64> %4 = xor <2 x i64> %2, %3 %5 = bitcast <2 x i64> %4 to <4 x float> - ret <4 x float> %5 + ; fadd forces execution domain + %6 = fadd <4 x float> %5, + ret <4 x float> %6 }