From d4d01154ef0cb9d84d1c1c342ec9d3115d932e99 Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Mon, 3 May 2010 22:36:46 +0000 Subject: [PATCH] Fix tests to use fadd, fsub, and fmul, instead of add, sub, and mul, when the type is floating-point. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@102969 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/vadd.ll | 4 ++-- test/CodeGen/ARM/vld2.ll | 4 ++-- test/CodeGen/ARM/vld3.ll | 4 ++-- test/CodeGen/ARM/vld4.ll | 4 ++-- test/CodeGen/ARM/vldlane.ll | 24 ++++++++++----------- test/CodeGen/ARM/vmla.ll | 8 +++---- test/CodeGen/ARM/vmls.ll | 8 +++---- test/CodeGen/ARM/vmul.ll | 4 ++-- test/CodeGen/ARM/vneg.ll | 4 ++-- test/CodeGen/ARM/vsub.ll | 4 ++-- test/CodeGen/ARM/vtrn.ll | 4 ++-- test/CodeGen/ARM/vuzp.ll | 2 +- test/CodeGen/ARM/vzip.ll | 2 +- test/CodeGen/X86/2010-01-07-UAMemFeature.ll | 2 +- test/CodeGen/X86/stack-align.ll | 2 +- 15 files changed, 40 insertions(+), 40 deletions(-) diff --git a/test/CodeGen/ARM/vadd.ll b/test/CodeGen/ARM/vadd.ll index 9fa530750aa..9bb8bf56104 100644 --- a/test/CodeGen/ARM/vadd.ll +++ b/test/CodeGen/ARM/vadd.ll @@ -41,7 +41,7 @@ define <2 x float> @vaddf32(<2 x float>* %A, <2 x float>* %B) nounwind { ;CHECK: vadd.f32 %tmp1 = load <2 x float>* %A %tmp2 = load <2 x float>* %B - %tmp3 = add <2 x float> %tmp1, %tmp2 + %tmp3 = fadd <2 x float> %tmp1, %tmp2 ret <2 x float> %tmp3 } @@ -86,7 +86,7 @@ define <4 x float> @vaddQf32(<4 x float>* %A, <4 x float>* %B) nounwind { ;CHECK: vadd.f32 %tmp1 = load <4 x float>* %A %tmp2 = load <4 x float>* %B - %tmp3 = add <4 x float> %tmp1, %tmp2 + %tmp3 = fadd <4 x float> %tmp1, %tmp2 ret <4 x float> %tmp3 } diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll index f5dc06cb302..0838636ce74 100644 --- a/test/CodeGen/ARM/vld2.ll +++ b/test/CodeGen/ARM/vld2.ll @@ -50,7 +50,7 @@ define <2 x float> @vld2f(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1 - %tmp4 = add <2 x float> %tmp2, %tmp3 + %tmp4 = fadd <2 x float> %tmp2, %tmp3 ret <2 x float> %tmp4 } @@ -104,7 +104,7 @@ define <4 x float> @vld2Qf(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2.v4f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp1, 1 - %tmp4 = add <4 x float> %tmp2, %tmp3 + %tmp4 = fadd <4 x float> %tmp2, %tmp3 ret <4 x float> %tmp4 } diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll index 33c4d37b3ed..65a24486bc6 100644 --- a/test/CodeGen/ARM/vld3.ll +++ b/test/CodeGen/ARM/vld3.ll @@ -50,7 +50,7 @@ define <2 x float> @vld3f(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 2 - %tmp4 = add <2 x float> %tmp2, %tmp3 + %tmp4 = fadd <2 x float> %tmp2, %tmp3 ret <2 x float> %tmp4 } @@ -108,7 +108,7 @@ define <4 x float> @vld3Qf(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x4x3_t @llvm.arm.neon.vld3.v4f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp1, 2 - %tmp4 = add <4 x float> %tmp2, %tmp3 + %tmp4 = fadd <4 x float> %tmp2, %tmp3 ret <4 x float> %tmp4 } diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll index e800cb539aa..e0b870638a1 100644 --- a/test/CodeGen/ARM/vld4.ll +++ b/test/CodeGen/ARM/vld4.ll @@ -50,7 +50,7 @@ define <2 x float> @vld4f(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 2 - %tmp4 = add <2 x float> %tmp2, %tmp3 + %tmp4 = fadd <2 x float> %tmp2, %tmp3 ret <2 x float> %tmp4 } @@ -108,7 +108,7 @@ define <4 x float> @vld4Qf(float* %A) nounwind { %tmp1 = call %struct.__neon_float32x4x4_t @llvm.arm.neon.vld4.v4f32(i8* %tmp0) %tmp2 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_float32x4x4_t %tmp1, 2 - %tmp4 = add <4 x float> %tmp2, %tmp3 + %tmp4 = fadd <4 x float> %tmp2, %tmp3 ret <4 x float> %tmp4 } diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll index 46a200220f6..b32c59019f4 100644 --- a/test/CodeGen/ARM/vldlane.ll +++ b/test/CodeGen/ARM/vldlane.ll @@ -52,7 +52,7 @@ define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind { %tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, i32 1) %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1 - %tmp5 = add <2 x float> %tmp3, %tmp4 + %tmp5 = fadd <2 x float> %tmp3, %tmp4 ret <2 x float> %tmp5 } @@ -88,7 +88,7 @@ define <4 x float> @vld2laneQf(float* %A, <4 x float>* %B) nounwind { %tmp2 = call %struct.__neon_float32x4x2_t @llvm.arm.neon.vld2lane.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, i32 1) %tmp3 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_float32x4x2_t %tmp2, 1 - %tmp5 = add <4 x float> %tmp3, %tmp4 + %tmp5 = fadd <4 x float> %tmp3, %tmp4 ret <4 x float> %tmp5 } @@ -160,8 +160,8 @@ define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind { %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 2 - %tmp6 = add <2 x float> %tmp3, %tmp4 - %tmp7 = add <2 x float> %tmp5, %tmp6 + %tmp6 = fadd <2 x float> %tmp3, %tmp4 + %tmp7 = fadd <2 x float> %tmp5, %tmp6 ret <2 x float> %tmp7 } @@ -202,8 +202,8 @@ define <4 x float> @vld3laneQf(float* %A, <4 x float>* %B) nounwind { %tmp3 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 0 %tmp4 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_float32x4x3_t %tmp2, 2 - %tmp6 = add <4 x float> %tmp3, %tmp4 - %tmp7 = add <4 x float> %tmp5, %tmp6 + %tmp6 = fadd <4 x float> %tmp3, %tmp4 + %tmp7 = fadd <4 x float> %tmp5, %tmp6 ret <4 x float> %tmp7 } @@ -282,9 +282,9 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind { %tmp4 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 2 %tmp6 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 3 - %tmp7 = add <2 x float> %tmp3, %tmp4 - %tmp8 = add <2 x float> %tmp5, %tmp6 - %tmp9 = add <2 x float> %tmp7, %tmp8 + %tmp7 = fadd <2 x float> %tmp3, %tmp4 + %tmp8 = fadd <2 x float> %tmp5, %tmp6 + %tmp9 = fadd <2 x float> %tmp7, %tmp8 ret <2 x float> %tmp9 } @@ -330,9 +330,9 @@ define <4 x float> @vld4laneQf(float* %A, <4 x float>* %B) nounwind { %tmp4 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 1 %tmp5 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 2 %tmp6 = extractvalue %struct.__neon_float32x4x4_t %tmp2, 3 - %tmp7 = add <4 x float> %tmp3, %tmp4 - %tmp8 = add <4 x float> %tmp5, %tmp6 - %tmp9 = add <4 x float> %tmp7, %tmp8 + %tmp7 = fadd <4 x float> %tmp3, %tmp4 + %tmp8 = fadd <4 x float> %tmp5, %tmp6 + %tmp9 = fadd <4 x float> %tmp7, %tmp8 ret <4 x float> %tmp9 } diff --git a/test/CodeGen/ARM/vmla.ll b/test/CodeGen/ARM/vmla.ll index 84052182741..77cf10ad3e6 100644 --- a/test/CodeGen/ARM/vmla.ll +++ b/test/CodeGen/ARM/vmla.ll @@ -39,8 +39,8 @@ define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n %tmp1 = load <2 x float>* %A %tmp2 = load <2 x float>* %B %tmp3 = load <2 x float>* %C - %tmp4 = mul <2 x float> %tmp2, %tmp3 - %tmp5 = add <2 x float> %tmp1, %tmp4 + %tmp4 = fmul <2 x float> %tmp2, %tmp3 + %tmp5 = fadd <2 x float> %tmp1, %tmp4 ret <2 x float> %tmp5 } @@ -83,8 +83,8 @@ define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) %tmp1 = load <4 x float>* %A %tmp2 = load <4 x float>* %B %tmp3 = load <4 x float>* %C - %tmp4 = mul <4 x float> %tmp2, %tmp3 - %tmp5 = add <4 x float> %tmp1, %tmp4 + %tmp4 = fmul <4 x float> %tmp2, %tmp3 + %tmp5 = fadd <4 x float> %tmp1, %tmp4 ret <4 x float> %tmp5 } diff --git a/test/CodeGen/ARM/vmls.ll b/test/CodeGen/ARM/vmls.ll index c89552e6f9e..2b70a7878ce 100644 --- a/test/CodeGen/ARM/vmls.ll +++ b/test/CodeGen/ARM/vmls.ll @@ -39,8 +39,8 @@ define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) n %tmp1 = load <2 x float>* %A %tmp2 = load <2 x float>* %B %tmp3 = load <2 x float>* %C - %tmp4 = mul <2 x float> %tmp2, %tmp3 - %tmp5 = sub <2 x float> %tmp1, %tmp4 + %tmp4 = fmul <2 x float> %tmp2, %tmp3 + %tmp5 = fsub <2 x float> %tmp1, %tmp4 ret <2 x float> %tmp5 } @@ -83,8 +83,8 @@ define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) %tmp1 = load <4 x float>* %A %tmp2 = load <4 x float>* %B %tmp3 = load <4 x float>* %C - %tmp4 = mul <4 x float> %tmp2, %tmp3 - %tmp5 = sub <4 x float> %tmp1, %tmp4 + %tmp4 = fmul <4 x float> %tmp2, %tmp3 + %tmp5 = fsub <4 x float> %tmp1, %tmp4 ret <4 x float> %tmp5 } diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll index 325da5deabe..1d916802127 100644 --- a/test/CodeGen/ARM/vmul.ll +++ b/test/CodeGen/ARM/vmul.ll @@ -32,7 +32,7 @@ define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind { ;CHECK: vmul.f32 %tmp1 = load <2 x float>* %A %tmp2 = load <2 x float>* %B - %tmp3 = mul <2 x float> %tmp1, %tmp2 + %tmp3 = fmul <2 x float> %tmp1, %tmp2 ret <2 x float> %tmp3 } @@ -77,7 +77,7 @@ define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind { ;CHECK: vmul.f32 %tmp1 = load <4 x float>* %A %tmp2 = load <4 x float>* %B - %tmp3 = mul <4 x float> %tmp1, %tmp2 + %tmp3 = fmul <4 x float> %tmp1, %tmp2 ret <4 x float> %tmp3 } diff --git a/test/CodeGen/ARM/vneg.ll b/test/CodeGen/ARM/vneg.ll index 7764e87c6ac..4a10732458e 100644 --- a/test/CodeGen/ARM/vneg.ll +++ b/test/CodeGen/ARM/vneg.ll @@ -28,7 +28,7 @@ define <2 x float> @vnegf32(<2 x float>* %A) nounwind { ;CHECK: vnegf32: ;CHECK: vneg.f32 %tmp1 = load <2 x float>* %A - %tmp2 = sub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 + %tmp2 = fsub <2 x float> < float -0.000000e+00, float -0.000000e+00 >, %tmp1 ret <2 x float> %tmp2 } @@ -60,7 +60,7 @@ define <4 x float> @vnegQf32(<4 x float>* %A) nounwind { ;CHECK: vnegQf32: ;CHECK: vneg.f32 %tmp1 = load <4 x float>* %A - %tmp2 = sub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1 + %tmp2 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %tmp1 ret <4 x float> %tmp2 } diff --git a/test/CodeGen/ARM/vsub.ll b/test/CodeGen/ARM/vsub.ll index 8f0055fd410..3416de76f12 100644 --- a/test/CodeGen/ARM/vsub.ll +++ b/test/CodeGen/ARM/vsub.ll @@ -41,7 +41,7 @@ define <2 x float> @vsubf32(<2 x float>* %A, <2 x float>* %B) nounwind { ;CHECK: vsub.f32 %tmp1 = load <2 x float>* %A %tmp2 = load <2 x float>* %B - %tmp3 = sub <2 x float> %tmp1, %tmp2 + %tmp3 = fsub <2 x float> %tmp1, %tmp2 ret <2 x float> %tmp3 } @@ -86,7 +86,7 @@ define <4 x float> @vsubQf32(<4 x float>* %A, <4 x float>* %B) nounwind { ;CHECK: vsub.f32 %tmp1 = load <4 x float>* %A %tmp2 = load <4 x float>* %B - %tmp3 = sub <4 x float> %tmp1, %tmp2 + %tmp3 = fsub <4 x float> %tmp1, %tmp2 ret <4 x float> %tmp3 } diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll index 5122b0981e9..10bb10ac24a 100644 --- a/test/CodeGen/ARM/vtrn.ll +++ b/test/CodeGen/ARM/vtrn.ll @@ -44,7 +44,7 @@ define <2 x float> @vtrnf(<2 x float>* %A, <2 x float>* %B) nounwind { %tmp2 = load <2 x float>* %B %tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> %tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> - %tmp5 = add <2 x float> %tmp3, %tmp4 + %tmp5 = fadd <2 x float> %tmp3, %tmp4 ret <2 x float> %tmp5 } @@ -92,6 +92,6 @@ define <4 x float> @vtrnQf(<4 x float>* %A, <4 x float>* %B) nounwind { %tmp2 = load <4 x float>* %B %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> - %tmp5 = add <4 x float> %tmp3, %tmp4 + %tmp5 = fadd <4 x float> %tmp3, %tmp4 ret <4 x float> %tmp5 } diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll index e531718d94a..6cef188d76d 100644 --- a/test/CodeGen/ARM/vuzp.ll +++ b/test/CodeGen/ARM/vuzp.ll @@ -70,6 +70,6 @@ define <4 x float> @vuzpQf(<4 x float>* %A, <4 x float>* %B) nounwind { %tmp2 = load <4 x float>* %B %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> - %tmp5 = add <4 x float> %tmp3, %tmp4 + %tmp5 = fadd <4 x float> %tmp3, %tmp4 ret <4 x float> %tmp5 } diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll index 32f7e0d02c4..a9ecdcab42d 100644 --- a/test/CodeGen/ARM/vzip.ll +++ b/test/CodeGen/ARM/vzip.ll @@ -70,6 +70,6 @@ define <4 x float> @vzipQf(<4 x float>* %A, <4 x float>* %B) nounwind { %tmp2 = load <4 x float>* %B %tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> %tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> - %tmp5 = add <4 x float> %tmp3, %tmp4 + %tmp5 = fadd <4 x float> %tmp3, %tmp4 ret <4 x float> %tmp5 } diff --git a/test/CodeGen/X86/2010-01-07-UAMemFeature.ll b/test/CodeGen/X86/2010-01-07-UAMemFeature.ll index 3728f15d969..bb24adb4181 100644 --- a/test/CodeGen/X86/2010-01-07-UAMemFeature.ll +++ b/test/CodeGen/X86/2010-01-07-UAMemFeature.ll @@ -6,6 +6,6 @@ target triple = "x86_64-unknown-linux-gnu" define <4 x float> @foo(<4 x float>* %P, <4 x float> %In) nounwind { %A = load <4 x float>* %P, align 4 - %B = add <4 x float> %A, %In + %B = fadd <4 x float> %A, %In ret <4 x float> %B } diff --git a/test/CodeGen/X86/stack-align.ll b/test/CodeGen/X86/stack-align.ll index e971ef70dbd..271ad1aad0b 100644 --- a/test/CodeGen/X86/stack-align.ll +++ b/test/CodeGen/X86/stack-align.ll @@ -31,7 +31,7 @@ define <2 x double> @test3(<2 x double> %x, <2 x double> %y) alignstack(32) { entry: ; CHECK: andl{{.*}}$-32, %esp call void @test2() - %A = mul <2 x double> %x, %y + %A = fmul <2 x double> %x, %y ret <2 x double> %A }