Remove fadd(fmul) patterns for FMA3. This needs to be implemented by paying attention to FP_CONTRACT and matching @llvm.fma which is not available yet. This will allow us to enablle intrinsic use at least though.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157804 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Craig Topper 2012-06-01 06:07:48 +00:00
parent 78fc72d0f1
commit 3a8172ad8d
2 changed files with 0 additions and 278 deletions

View File

@ -113,162 +113,6 @@ let ExeDomain = SSEPackedDouble in {
memopv4f64, int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W;
}
let Predicates = [HasFMA3], AddedComplexity = 20 in {
//------------
// FP double precision ADD - 256
//------------
// FMA231: src1 = src2*src3 + src1
def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
(VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 + src1
def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
(VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP double precision ADD - 128
//------------
// FMA231: src1 = src2*src3 + src1
def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
(VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 + src1
def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
(VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
//------------
// FP double precision SUB - 256
//------------
// FMA231: src1 = src2*src3 - src1
def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
(VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 - src1
def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
(VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP double precision SUB - 128
//------------
// FMA231: src1 = src2*src3 - src1
def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
(VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 - src1
def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
(VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
//------------
// FP double precision FNMADD - 256
//------------
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))),
(VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
(VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP double precision FNMADD - 128
//------------
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))),
(VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
(VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
//------------
// FP single precision ADD - 256
//------------
// FMA231: src1 = src2*src3 + src1
def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
(VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
// FMA213 : src1 = src2*src1 + src3
def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))),
(VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 + src1
def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)),
(VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA213: src1 = src2*src1 + src3
def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)),
(VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP single precision ADD - 128
//------------
// FMA231 : src1 = src2*src3 + src1
def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
(VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231 : src1 = src2*src3 + src1
def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
(VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
//------------
// FP single precision SUB - 256
//------------
// FMA231: src1 = src2*src3 - src1
def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)),
(VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = src2*src3 - src1
def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
(VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP single precision SUB - 128
//------------
// FMA231 : src1 = src2*src3 - src1
def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
(VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231 : src1 = src2*src3 - src1
def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
(VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
//------------
// FP single precision FNMADD - 256
//------------
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))),
(VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
// FMA231: src1 = - src2*src3 + src1
def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
(VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
//------------
// FP single precision FNMADD - 128
//------------
// FMA231 : src1 = src2*src3 - src1
def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))),
(VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
// FMA231 : src1 = src2*src3 - src1
def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
(VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
} // HasFMA3
//------------------------------
// SCALAR
//------------------------------
let Constraints = "$src1 = $dst" in {
multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
@ -328,62 +172,6 @@ defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64,
int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG;
let Predicates = [HasFMA3], AddedComplexity = 20 in {
//------------
// FP scalar ADD
//------------
// FMADD231 : src1 = src2*src3 + src1
def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
(VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
(VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
(VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
(VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
//------------
// FP scalar SUB src2*src3 - src1
//------------
def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
(VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
(VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
(VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
(VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
//------------
// FP scalar NADD src1 - src2*src3
//------------
def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))),
(VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))),
(VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))),
(VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))),
(VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
} // HasFMA3
//===----------------------------------------------------------------------===//
// FMA4 - AMD 4 operand Fused Multiply-Add instructions
//===----------------------------------------------------------------------===//

View File

@ -1,66 +0,0 @@
; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=core-avx2 -mattr=avx2,+fma3 | FileCheck %s
define <4 x float> @test_x86_fmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; CHECK: fmadd231ps {{.*\(%r.*}}, %xmm
%x = fmul <4 x float> %a0, %a1
%res = fadd <4 x float> %x, %a2
ret <4 x float> %res
}
define <4 x float> @test_x86_fmsub_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; CHECK: fmsub231ps {{.*\(%r.*}}, %xmm
%x = fmul <4 x float> %a0, %a1
%res = fsub <4 x float> %x, %a2
ret <4 x float> %res
}
define <4 x float> @test_x86_fnmadd_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) {
; CHECK: fnmadd231ps {{.*\(%r.*}}, %xmm
%x = fmul <4 x float> %a0, %a1
%res = fsub <4 x float> %a2, %x
ret <4 x float> %res
}
define <8 x float> @test_x86_fmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; CHECK: vfmadd213ps {{.*\(%r.*}}, %ymm
%x = fmul <8 x float> %a0, %a1
%res = fadd <8 x float> %x, %a2
ret <8 x float> %res
}
define <4 x double> @test_x86_fmadd_pd_y(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) {
; CHECK: vfmadd231pd {{.*\(%r.*}}, %ymm
%x = fmul <4 x double> %a0, %a1
%res = fadd <4 x double> %x, %a2
ret <4 x double> %res
}
define <8 x float> @test_x86_fmsub_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; CHECK: fmsub231ps {{.*\(%r.*}}, %ymm
%x = fmul <8 x float> %a0, %a1
%res = fsub <8 x float> %x, %a2
ret <8 x float> %res
}
define <8 x float> @test_x86_fnmadd_ps_y(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) {
; CHECK: fnmadd231ps {{.*\(%r.*}}, %ymm
%x = fmul <8 x float> %a0, %a1
%res = fsub <8 x float> %a2, %x
ret <8 x float> %res
}
define float @test_x86_fnmadd_ss(float %a0, float %a1, float %a2) {
; CHECK: vfnmadd231ss %xmm1, %xmm0, %xmm2
%x = fmul float %a0, %a1
%res = fsub float %a2, %x
ret float %res
}
define double @test_x86_fnmadd_sd(double %a0, double %a1, double %a2) {
; CHECK: vfnmadd231sd %xmm1, %xmm0, %xmm2
%x = fmul double %a0, %a1
%res = fsub double %a2, %x
ret double %res
}