From 57d4b3315fb7a84379778c4727594cd7480dbfe1 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 30 Dec 2011 01:49:53 +0000 Subject: [PATCH] Fix load size for FMA4 SS/SD instructions. They need to use f32 and f64 size, but with the special handling to be compatible with the intrinsic expecting a vector. Similar handling is already used elsewhere. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147360 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrFMA.td | 103 +++++++++------------ test/CodeGen/X86/fma4-intrinsics-x86_64.ll | 28 ++++++ 2 files changed, 71 insertions(+), 60 deletions(-) diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td index 83429eb4dd8..ce4e1a7bb59 100644 --- a/lib/Target/X86/X86InstrFMA.td +++ b/lib/Target/X86/X86InstrFMA.td @@ -98,23 +98,22 @@ defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub">; //===----------------------------------------------------------------------===// -multiclass fma4s opc, string OpcodeStr> { +multiclass fma4s opc, string OpcodeStr, Operand memop> { def rr : FMA4, XOP_W; def rm : FMA4, XOP_W; def mr : FMA4; - } multiclass fma4p opc, string OpcodeStr> { @@ -151,20 +150,20 @@ multiclass fma4p opc, string OpcodeStr> { } let isAsmParserOnly = 1 in { - defm VFMADDSS4 : fma4s<0x6A, "vfmaddss">; - defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd">; + defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem>; + defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem>; defm VFMADDPS4 : fma4p<0x68, "vfmaddps">; defm VFMADDPD4 : fma4p<0x69, "vfmaddpd">; - defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss">; - defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd">; + defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem>; + defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem>; defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps">; defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd">; - defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss">; - defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd">; + defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem>; + defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem>; defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps">; defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd">; - defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss">; - defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd">; + defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem>; + defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem>; defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps">; defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd">; defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps">; @@ -178,21 +177,17 @@ let isAsmParserOnly = 1 in { // VFMADD def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, VR128:$src2, VR128:$src3), (VFMADDSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, VR128:$src2, - (alignedloadv4f32 addr:$src3)), - (VFMADDSS4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, (alignedloadv4f32 addr:$src2), - VR128:$src3), - (VFMADDSS4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3), + (VFMADDSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>; +def : Pat<(int_x86_fma4_vfmadd_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3), + (VFMADDSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, VR128:$src2, VR128:$src3), (VFMADDSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, VR128:$src2, - (alignedloadv2f64 addr:$src3)), - (VFMADDSD4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, (alignedloadv2f64 addr:$src2), - VR128:$src3), - (VFMADDSD4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3), + (VFMADDSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>; +def : Pat<(int_x86_fma4_vfmadd_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3), + (VFMADDSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfmadd_ps VR128:$src1, VR128:$src2, VR128:$src3), (VFMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -235,21 +230,17 @@ def : Pat<(int_x86_fma4_vfmadd_pd_256 VR256:$src1, // VFMSUB def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, VR128:$src2, VR128:$src3), (VFMSUBSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, VR128:$src2, - (alignedloadv4f32 addr:$src3)), - (VFMSUBSS4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, (alignedloadv4f32 addr:$src2), - VR128:$src3), - (VFMSUBSS4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3), + (VFMSUBSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>; +def : Pat<(int_x86_fma4_vfmsub_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3), + (VFMSUBSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, VR128:$src2, VR128:$src3), (VFMSUBSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, VR128:$src2, - (alignedloadv2f64 addr:$src3)), - (VFMSUBSD4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, (alignedloadv2f64 addr:$src2), - VR128:$src3), - (VFMSUBSD4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3), + (VFMSUBSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>; +def : Pat<(int_x86_fma4_vfmsub_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3), + (VFMSUBSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfmsub_ps VR128:$src1, VR128:$src2, VR128:$src3), (VFMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -292,21 +283,17 @@ def : Pat<(int_x86_fma4_vfmsub_pd_256 VR256:$src1, // VFNMADD def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, VR128:$src2, VR128:$src3), (VFNMADDSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, VR128:$src2, - (alignedloadv4f32 addr:$src3)), - (VFNMADDSS4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, (alignedloadv4f32 addr:$src2), - VR128:$src3), - (VFNMADDSS4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3), + (VFNMADDSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>; +def : Pat<(int_x86_fma4_vfnmadd_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3), + (VFNMADDSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, VR128:$src2, VR128:$src3), (VFNMADDSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, VR128:$src2, - (alignedloadv2f64 addr:$src3)), - (VFNMADDSD4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, (alignedloadv2f64 addr:$src2), - VR128:$src3), - (VFNMADDSD4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3), + (VFNMADDSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>; +def : Pat<(int_x86_fma4_vfnmadd_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3), + (VFNMADDSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfnmadd_ps VR128:$src1, VR128:$src2, VR128:$src3), (VFNMADDPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -349,21 +336,17 @@ def : Pat<(int_x86_fma4_vfnmadd_pd_256 VR256:$src1, // VFNMSUB def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, VR128:$src2, VR128:$src3), (VFNMSUBSS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, VR128:$src2, - (alignedloadv4f32 addr:$src3)), - (VFNMSUBSS4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, (alignedloadv4f32 addr:$src2), - VR128:$src3), - (VFNMSUBSS4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, VR128:$src2, sse_load_f32:$src3), + (VFNMSUBSS4rm VR128:$src1, VR128:$src2, sse_load_f32:$src3)>; +def : Pat<(int_x86_fma4_vfnmsub_ss VR128:$src1, sse_load_f32:$src2, VR128:$src3), + (VFNMSUBSS4mr VR128:$src1, sse_load_f32:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, VR128:$src2, VR128:$src3), (VFNMSUBSD4rr VR128:$src1, VR128:$src2, VR128:$src3)>; -def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, VR128:$src2, - (alignedloadv2f64 addr:$src3)), - (VFNMSUBSD4rm VR128:$src1, VR128:$src2, addr:$src3)>; -def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, (alignedloadv2f64 addr:$src2), - VR128:$src3), - (VFNMSUBSD4mr VR128:$src1, addr:$src2, VR128:$src3)>; +def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, VR128:$src2, sse_load_f64:$src3), + (VFNMSUBSD4rm VR128:$src1, VR128:$src2, sse_load_f64:$src3)>; +def : Pat<(int_x86_fma4_vfnmsub_sd VR128:$src1, sse_load_f64:$src2, VR128:$src3), + (VFNMSUBSD4mr VR128:$src1, sse_load_f64:$src2, VR128:$src3)>; def : Pat<(int_x86_fma4_vfnmsub_ps VR128:$src1, VR128:$src2, VR128:$src3), (VFNMSUBPS4rr VR128:$src1, VR128:$src2, VR128:$src3)>; diff --git a/test/CodeGen/X86/fma4-intrinsics-x86_64.ll b/test/CodeGen/X86/fma4-intrinsics-x86_64.ll index bd94c134ce2..a4b9cc66660 100644 --- a/test/CodeGen/X86/fma4-intrinsics-x86_64.ll +++ b/test/CodeGen/X86/fma4-intrinsics-x86_64.ll @@ -6,6 +6,20 @@ define < 4 x float > @test_x86_fma4_vfmadd_ss(< 4 x float > %a0, < 4 x float > % %res = call < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %a2) ; [#uses=1] ret < 4 x float > %res } +define < 4 x float > @test_x86_fma4_vfmadd_ss_load(< 4 x float > %a0, < 4 x float > %a1, float* %a2) { + ; CHECK: vfmaddss (%{{.*}}) + %x = load float *%a2 + %y = insertelement <4 x float> undef, float %x, i32 0 + %res = call < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %y) ; [#uses=1] + ret < 4 x float > %res +} +define < 4 x float > @test_x86_fma4_vfmadd_ss_load2(< 4 x float > %a0, float* %a1, < 4 x float > %a2) { + ; CHECK: vfmaddss %{{.*}}, (%{{.*}}) + %x = load float *%a1 + %y = insertelement <4 x float> undef, float %x, i32 0 + %res = call < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float > %a0, < 4 x float > %y, < 4 x float > %a2) ; [#uses=1] + ret < 4 x float > %res +} declare < 4 x float > @llvm.x86.fma4.vfmadd.ss(< 4 x float >, < 4 x float >, < 4 x float >) nounwind readnone define < 2 x double > @test_x86_fma4_vfmadd_sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %a2) { @@ -13,6 +27,20 @@ define < 2 x double > @test_x86_fma4_vfmadd_sd(< 2 x double > %a0, < 2 x double %res = call < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %a2) ; [#uses=1] ret < 2 x double > %res } +define < 2 x double > @test_x86_fma4_vfmadd_sd_load(< 2 x double > %a0, < 2 x double > %a1, double* %a2) { + ; CHECK: vfmaddsd (%{{.*}}) + %x = load double *%a2 + %y = insertelement <2 x double> undef, double %x, i32 0 + %res = call < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double > %a0, < 2 x double > %a1, < 2 x double > %y) ; [#uses=1] + ret < 2 x double > %res +} +define < 2 x double > @test_x86_fma4_vfmadd_sd_load2(< 2 x double > %a0, double* %a1, < 2 x double > %a2) { + ; CHECK: vfmaddsd %{{.*}}, (%{{.*}}) + %x = load double *%a1 + %y = insertelement <2 x double> undef, double %x, i32 0 + %res = call < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double > %a0, < 2 x double > %y, < 2 x double > %a2) ; [#uses=1] + ret < 2 x double > %res +} declare < 2 x double > @llvm.x86.fma4.vfmadd.sd(< 2 x double >, < 2 x double >, < 2 x double >) nounwind readnone define < 4 x float > @test_x86_fma4_vfmadd_ps(< 4 x float > %a0, < 4 x float > %a1, < 4 x float > %a2) {