From 645b8be38a71f1a5d2d87ba4a649b11ad7875744 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Sat, 3 Sep 2011 00:46:42 +0000 Subject: [PATCH] Teach X86FastISel to use AVX versions of instructions when possible git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139062 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86FastISel.cpp | 44 ++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 452b215071f..457b74250c8 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -198,8 +198,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, RC = X86::GR64RegisterClass; break; case MVT::f32: - if (Subtarget->hasSSE1()) { - Opc = X86::MOVSSrm; + if (X86ScalarSSEf32) { + Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; RC = X86::FR32RegisterClass; } else { Opc = X86::LD_Fp32m; @@ -207,8 +207,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, } break; case MVT::f64: - if (Subtarget->hasSSE2()) { - Opc = X86::MOVSDrm; + if (X86ScalarSSEf64) { + Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; RC = X86::FR64RegisterClass; } else { Opc = X86::LD_Fp64m; @@ -250,10 +250,12 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) { case MVT::i32: Opc = X86::MOV32mr; break; case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode. case MVT::f32: - Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m; + Opc = X86ScalarSSEf32 ? + (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m; break; case MVT::f64: - Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m; + Opc = X86ScalarSSEf64 ? + (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; break; } @@ -805,14 +807,20 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { } static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) { + bool HasAVX = Subtarget->hasAVX(); + bool X86ScalarSSEf32 = HasAVX || Subtarget->hasSSE1(); + bool X86ScalarSSEf64 = HasAVX || Subtarget->hasSSE2(); + switch (VT.getSimpleVT().SimpleTy) { default: return 0; case MVT::i8: return X86::CMP8rr; case MVT::i16: return X86::CMP16rr; case MVT::i32: return X86::CMP32rr; case MVT::i64: return X86::CMP64rr; - case MVT::f32: return Subtarget->hasSSE1() ? X86::UCOMISSrr : 0; - case MVT::f64: return Subtarget->hasSSE2() ? X86::UCOMISDrr : 0; + case MVT::f32: + return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0; + case MVT::f64: + return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0; } } @@ -1215,7 +1223,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { bool X86FastISel::X86SelectFPExt(const Instruction *I) { // fpext from float to double. - if (Subtarget->hasSSE2() && + if (X86ScalarSSEf64 && I->getType()->isDoubleTy()) { const Value *V = I->getOperand(0); if (V->getType()->isFloatTy()) { @@ -1234,7 +1242,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { } bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { - if (Subtarget->hasSSE2()) { + if (X86ScalarSSEf64) { if (I->getType()->isFloatTy()) { const Value *V = I->getOperand(0); if (V->getType()->isDoubleTy()) { @@ -1976,8 +1984,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { RC = X86::GR64RegisterClass; break; case MVT::f32: - if (Subtarget->hasSSE1()) { - Opc = X86::MOVSSrm; + if (X86ScalarSSEf32) { + Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; RC = X86::FR32RegisterClass; } else { Opc = X86::LD_Fp32m; @@ -1985,8 +1993,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { } break; case MVT::f64: - if (Subtarget->hasSSE2()) { - Opc = X86::MOVSDrm; + if (X86ScalarSSEf64) { + Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm; RC = X86::FR64RegisterClass; } else { Opc = X86::LD_Fp64m; @@ -2081,8 +2089,8 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { switch (VT.SimpleTy) { default: return false; case MVT::f32: - if (Subtarget->hasSSE1()) { - Opc = X86::FsFLD0SS; + if (X86ScalarSSEf32) { + Opc = Subtarget->hasAVX() ? X86::VFsFLD0SS : X86::FsFLD0SS; RC = X86::FR32RegisterClass; } else { Opc = X86::LD_Fp032; @@ -2090,8 +2098,8 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { } break; case MVT::f64: - if (Subtarget->hasSSE2()) { - Opc = X86::FsFLD0SD; + if (X86ScalarSSEf64) { + Opc = Subtarget->hasAVX() ? X86::VFsFLD0SD : X86::FsFLD0SD; RC = X86::FR64RegisterClass; } else { Opc = X86::LD_Fp064;