From d9567223e852c48b4022345ed130000810521438 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 1 Jun 2012 05:24:29 +0000 Subject: [PATCH] Tidy up. Remove trailing spaces and fix the worst of the 80 column violations. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@157799 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrFMA.td | 162 +++++++++++++++++++--------------- 1 file changed, 89 insertions(+), 73 deletions(-) diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td index 273666985ca..1b7f0949df7 100644 --- a/lib/Target/X86/X86InstrFMA.td +++ b/lib/Target/X86/X86InstrFMA.td @@ -38,76 +38,79 @@ multiclass fma3p_rm opc, string OpcodeStr> { } // Intrinsic for 132 pattern -multiclass fma3p_rm_int opc, string OpcodeStr, +multiclass fma3p_rm_int opc, string OpcodeStr, PatFrag MemFrag128, PatFrag MemFrag256, Intrinsic Int128, Intrinsic Int256> { def r_Int : FMA3; - //let mayLoad = 1 in def m_Int : FMA3; + [(set VR128:$dst, + (Int128 VR128:$src1, (MemFrag128 addr:$src3), VR128:$src2))]>; def rY_Int : FMA3; - //let mayLoad = 1 in def mY_Int : FMA3; + [(set VR256:$dst, + (Int256 VR256:$src1, (MemFrag256 addr:$src3), VR256:$src2))]>; } } multiclass fma3p_forms opc132, bits<8> opc213, bits<8> opc231, - string OpcodeStr, string PackTy, + string OpcodeStr, string PackTy, PatFrag MemFrag128, PatFrag MemFrag256, Intrinsic Int128, Intrinsic Int256> { - defm r132 : fma3p_rm_int ; - defm r132 : fma3p_rm ; - defm r213 : fma3p_rm ; - defm r231 : fma3p_rm ; + defm r132 : fma3p_rm_int ; + defm r132 : fma3p_rm ; + defm r213 : fma3p_rm ; + defm r231 : fma3p_rm ; } // Fused Multiply-Add let ExeDomain = SSEPackedSingle in { - defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfmadd_ps, int_x86_fma4_vfmadd_ps_256>; - defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfmsub_ps, int_x86_fma4_vfmsub_ps_256>; - defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfmaddsub_ps, int_x86_fma4_vfmaddsub_ps_256>; - defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfmsubadd_ps, int_x86_fma4_vfmaddsub_ps_256>; + defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32, + memopv8f32, int_x86_fma4_vfmadd_ps, int_x86_fma4_vfmadd_ps_256>; + defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32, + memopv8f32, int_x86_fma4_vfmsub_ps, int_x86_fma4_vfmsub_ps_256>; + defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", + memopv4f32, memopv8f32, int_x86_fma4_vfmaddsub_ps, + int_x86_fma4_vfmaddsub_ps_256>; + defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", + memopv4f32, memopv8f32, int_x86_fma4_vfmsubadd_ps, + int_x86_fma4_vfmaddsub_ps_256>; } let ExeDomain = SSEPackedDouble in { - defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfmadd_pd, int_x86_fma4_vfmadd_pd_256>, VEX_W; - defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfmsub_pd, int_x86_fma4_vfmsub_pd_256>, VEX_W; - defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfmaddsub_pd, int_x86_fma4_vfmaddsub_pd_256>, VEX_W; - defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfmsubadd_pd, int_x86_fma4_vfmsubadd_pd_256>, VEX_W; + defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfmadd_pd, int_x86_fma4_vfmadd_pd_256>, VEX_W; + defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfmsub_pd, int_x86_fma4_vfmsub_pd_256>, VEX_W; + defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfmaddsub_pd, int_x86_fma4_vfmaddsub_pd_256>, VEX_W; + defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfmsubadd_pd, int_x86_fma4_vfmsubadd_pd_256>, VEX_W; } // Fused Negative Multiply-Add let ExeDomain = SSEPackedSingle in { - defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfnmadd_ps, int_x86_fma4_vfnmadd_ps_256>; - defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32, memopv8f32, - int_x86_fma4_vfnmsub_ps, int_x86_fma4_vfnmsub_ps_256>; + defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32, + memopv8f32, int_x86_fma4_vfnmadd_ps, int_x86_fma4_vfnmadd_ps_256>; + defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32, + memopv8f32, int_x86_fma4_vfnmsub_ps, int_x86_fma4_vfnmsub_ps_256>; } let ExeDomain = SSEPackedDouble in { - defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfnmadd_pd, int_x86_fma4_vfnmadd_pd_256>, VEX_W; - defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", memopv2f64, memopv4f64, - int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W; + defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfnmadd_pd, int_x86_fma4_vfnmadd_pd_256>, VEX_W; + defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", memopv2f64, + memopv4f64, int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W; } let Predicates = [HasFMA3], AddedComplexity = 20 in { @@ -118,11 +121,11 @@ let Predicates = [HasFMA3], AddedComplexity = 20 in { // FMA231: src1 = src2*src3 + src1 def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)), (VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - -// FMA231: src1 = src2*src3 + src1 + +// FMA231: src1 = src2*src3 + src1 def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - + //------------ // FP double precision ADD - 128 @@ -132,7 +135,7 @@ def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), // FMA231: src1 = src2*src3 + src1 def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - + // FMA231: src1 = src2*src3 + src1 def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -143,11 +146,11 @@ def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), // FMA231: src1 = src2*src3 - src1 def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)), (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - + // FMA231: src1 = src2*src3 - src1 def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - + //------------ // FP double precision SUB - 128 @@ -156,22 +159,22 @@ def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), // FMA231: src1 = src2*src3 - src1 def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)), (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - + // FMA231: src1 = src2*src3 - src1 def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - + //------------ // FP double precision FNMADD - 256 //------------ // FMA231: src1 = - src2*src3 + src1 def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))), (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>; - + // FMA231: src1 = - src2*src3 + src1 def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; - + //------------ // FP double precision FNMADD - 128 //------------ @@ -179,11 +182,11 @@ def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), // FMA231: src1 = - src2*src3 + src1 def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))), (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>; - + // FMA231: src1 = - src2*src3 + src1 def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - + //------------ // FP single precision ADD - 256 //------------ @@ -192,7 +195,7 @@ def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)), (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>; -// FMA213 : src1 = src2*src1 + src3 +// FMA213 : src1 = src2*src1 + src3 def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))), (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>; @@ -208,14 +211,14 @@ def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)), // FP single precision ADD - 128 //------------ -// FMA231 : src1 = src2*src3 + src1 +// FMA231 : src1 = src2*src3 + src1 def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; -// FMA231 : src1 = src2*src3 + src1 +// FMA231 : src1 = src2*src3 + src1 def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)), (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; - + //------------ // FP single precision SUB - 256 //------------ @@ -230,11 +233,11 @@ def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)), //------------ // FP single precision SUB - 128 //------------ -// FMA231 : src1 = src2*src3 - src1 +// FMA231 : src1 = src2*src3 - src1 def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)), (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; -// FMA231 : src1 = src2*src3 - src1 +// FMA231 : src1 = src2*src3 - src1 def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)), (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -253,11 +256,11 @@ def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))), // FP single precision FNMADD - 128 //------------ -// FMA231 : src1 = src2*src3 - src1 +// FMA231 : src1 = src2*src3 - src1 def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))), (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>; -// FMA231 : src1 = src2*src3 - src1 +// FMA231 : src1 = src2*src3 - src1 def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>; @@ -268,7 +271,8 @@ def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))), //------------------------------ let Constraints = "$src1 = $dst" in { -multiclass fma3s_rm opc, string OpcodeStr, X86MemOperand x86memop, RegisterClass RC> { +multiclass fma3s_rm opc, string OpcodeStr, X86MemOperand x86memop, + RegisterClass RC> { def r : FMA3 opc, string OpcodeStr, X86MemOperand x86memop, Regis []>; } -multiclass fma3s_rm_int opc, string OpcodeStr,X86MemOperand x86memop, RegisterClass RC, - Intrinsic IntId> { +multiclass fma3s_rm_int opc, string OpcodeStr, X86MemOperand x86memop, + RegisterClass RC, Intrinsic IntId> { def r_Int : FMA3 opc, string OpcodeStr,X86MemOperand x86memop, Re } multiclass fma3s_forms opc132, bits<8> opc213, bits<8> opc231, - string OpcodeStr, string PackTy, X86MemOperand MemOp, + string OpcodeStr, string PackTy, X86MemOperand MemOp, RegisterClass RC, Intrinsic IntId> { - defm r132 : fma3s_rm ; - defm r213 : fma3s_rm ; - defm r231 : fma3s_rm ; - defm r132_Int: fma3s_rm_int ; + defm r132 : fma3s_rm ; + defm r213 : fma3s_rm ; + defm r231 : fma3s_rm ; + defm r132_Int : fma3s_rm_int ; } -defm VFMADDSS : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "ss", f32mem, FR32, int_x86_fma4_vfmadd_ss>, VEX_LIG; -defm VFMADDSD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "sd", f64mem, FR64, int_x86_fma4_vfmadd_sd>, VEX_W, VEX_LIG; -defm VFMSUBSS : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "ss", f32mem, FR32, int_x86_fma4_vfmsub_ss>, VEX_LIG; -defm VFMSUBSD : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "sd", f64mem, FR64, int_x86_fma4_vfmsub_sd>, VEX_W, VEX_LIG; +defm VFMADDSS : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "ss", f32mem, FR32, + int_x86_fma4_vfmadd_ss>, VEX_LIG; +defm VFMADDSD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", "sd", f64mem, FR64, + int_x86_fma4_vfmadd_sd>, VEX_W, VEX_LIG; +defm VFMSUBSS : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "ss", f32mem, FR32, + int_x86_fma4_vfmsub_ss>, VEX_LIG; +defm VFMSUBSD : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", "sd", f64mem, FR64, + int_x86_fma4_vfmsub_sd>, VEX_W, VEX_LIG; -defm VFNMADDSS : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "ss", f32mem, FR32, int_x86_fma4_vfnmadd_ss>, VEX_LIG; -defm VFNMADDSD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "sd", f64mem, FR64, int_x86_fma4_vfnmadd_sd>, VEX_W, VEX_LIG; -defm VFNMSUBSS : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "ss", f32mem, FR32, int_x86_fma4_vfnmsub_ss>, VEX_LIG; -defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64, int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG; +defm VFNMADDSS : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "ss", f32mem, FR32, + int_x86_fma4_vfnmadd_ss>, VEX_LIG; +defm VFNMADDSD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", "sd", f64mem, FR64, + int_x86_fma4_vfnmadd_sd>, VEX_W, VEX_LIG; +defm VFNMSUBSS : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "ss", f32mem, FR32, + int_x86_fma4_vfnmsub_ss>, VEX_LIG; +defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64, + int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG; let Predicates = [HasFMA3], AddedComplexity = 20 in { //------------ -// FP scalar ADD +// FP scalar ADD //------------ -// FMADD231 : src1 = src2*src3 + src1 +// FMADD231 : src1 = src2*src3 + src1 def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)), (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;