Make X86::FsFLD0SS / FsFLD0SD real pseudo-instructions.

Like V_SET0, these instructions are expanded by ExpandPostRA to xorps /
vxorps so they can participate in execution domain swizzling.

This also makes the AVX variants redundant.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145440 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jakob Stoklund Olesen 2011-11-29 22:27:25 +00:00
parent be4c844648
commit 0edd83bfff
12 changed files with 71 additions and 81 deletions

View File

@ -2121,7 +2121,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
default: return false;
case MVT::f32:
if (X86ScalarSSEf32) {
Opc = Subtarget->hasAVX() ? X86::VFsFLD0SS : X86::FsFLD0SS;
Opc = X86::FsFLD0SS;
RC = X86::FR32RegisterClass;
} else {
Opc = X86::LD_Fp032;
@ -2130,7 +2130,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
break;
case MVT::f64:
if (X86ScalarSSEf64) {
Opc = Subtarget->hasAVX() ? X86::VFsFLD0SD : X86::FsFLD0SD;
Opc = X86::FsFLD0SD;
RC = X86::FR64RegisterClass;
} else {
Opc = X86::LD_Fp064;

View File

@ -2556,6 +2556,8 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
switch (MI->getOpcode()) {
case X86::V_SET0:
case X86::FsFLD0SS:
case X86::FsFLD0SD:
return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
case X86::TEST8ri_NOREX:
MI->setDesc(get(X86::TEST8ri));
@ -2911,11 +2913,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
Alignment = 16;
break;
case X86::FsFLD0SD:
case X86::VFsFLD0SD:
Alignment = 8;
break;
case X86::FsFLD0SS:
case X86::VFsFLD0SS:
Alignment = 4;
break;
default:
@ -2950,9 +2950,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
case X86::AVX_SETALLONES:
case X86::AVX2_SETALLONES:
case X86::FsFLD0SD:
case X86::FsFLD0SS:
case X86::VFsFLD0SD:
case X86::VFsFLD0SS: {
case X86::FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
// Create a constant-pool entry and operands to load from it.
@ -2978,9 +2976,9 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
unsigned Opc = LoadMI->getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::VFsFLD0SS)
if (Opc == X86::FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction()->getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::VFsFLD0SD)
else if (Opc == X86::FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction()->getContext());
else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);

View File

@ -473,6 +473,7 @@ def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">;
def HasAVX : Predicate<"Subtarget->hasAVX()">;
def HasAVX2 : Predicate<"Subtarget->hasAVX2()">;
def HasXMM : Predicate<"Subtarget->hasXMM()">;
def HasXMMInt : Predicate<"Subtarget->hasXMMInt()">;
def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">;

View File

@ -240,21 +240,13 @@ let Predicates = [HasAVX] in {
}
// Alias instructions that map fld0 to pxor for sse.
// FIXME: Set encoding to pseudo!
let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
canFoldAsLoad = 1 in {
def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
[(set FR32:$dst, fp32imm0)]>,
Requires<[HasSSE1]>, TB, OpSize;
def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
[(set FR64:$dst, fpimm0)]>,
Requires<[HasSSE2]>, TB, OpSize;
def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
[(set FR32:$dst, fp32imm0)]>,
Requires<[HasAVX]>, TB, OpSize, VEX_4V;
def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
[(set FR64:$dst, fpimm0)]>,
Requires<[HasAVX]>, TB, OpSize, VEX_4V;
// This is expanded by ExpandPostRAPseudos.
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
isPseudo = 1 in {
def FsFLD0SS : I<0, Pseudo, (outs FR32:$dst), (ins), "",
[(set FR32:$dst, fp32imm0)]>, Requires<[HasXMM]>;
def FsFLD0SD : I<0, Pseudo, (outs FR64:$dst), (ins), "",
[(set FR64:$dst, fpimm0)]>, Requires<[HasXMMInt]>;
}
//===----------------------------------------------------------------------===//

View File

@ -368,10 +368,6 @@ ReSimplify:
case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break;
case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break;
case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break;
case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break;
case X86::VFsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
case X86::VFsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break;
case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break;
case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break;
case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break;

View File

@ -1,5 +1,5 @@
; RUN: llc < %s -mcpu=core2 | grep pxor | count 2
; RUN: llc < %s -mcpu=core2 | not grep movapd
; RUN: llc < %s -mcpu=core2 | grep xorps | count 2
; RUN: llc < %s -mcpu=core2 | not grep movap
; PR2715
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"

View File

@ -11,7 +11,7 @@ entry:
define float @extractFloat2() nounwind {
entry:
; CHECK: pxor %xmm0, %xmm0
; CHECK: xorps %xmm0, %xmm0
%tmp4 = bitcast <1 x double> <double 0x000000003F800000> to <2 x float>
%tmp5 = extractelement <2 x float> %tmp4, i32 1
ret float %tmp5

View File

@ -225,18 +225,20 @@ if.else: ; preds = %entry
; CHECK-NEXT: je
}
; Check that 0.0 is materialized using pxor
; Check that 0.0 is materialized using xorps
define void @test18(float* %p1) {
store float 0.0, float* %p1
ret void
; CHECK: test18:
; CHECK: pxor
; CHECK: xorps
}
; Without any type hints, doubles use the smaller xorps instead of xorpd.
define void @test19(double* %p1) {
store double 0.0, double* %p1
ret void
; CHECK: test19:
; CHECK: pxor
; CHECK: xorps
}
; Check that we fast-isel sret
@ -252,12 +254,12 @@ entry:
}
declare void @test20sret(%struct.a* sret)
; Check that -0.0 is not materialized using pxor
; Check that -0.0 is not materialized using xor
define void @test21(double* %p1) {
store double -0.0, double* %p1
ret void
; CHECK: test21:
; CHECK-NOT: pxor
; CHECK-NOT: xor
; CHECK: movsd LCPI
}

View File

@ -1,4 +1,4 @@
; RUN: llc < %s -march=x86-64 | grep {pxor %xmm0, %xmm0} | count 2
; RUN: llc < %s -march=x86-64 | grep {xorps %xmm0, %xmm0} | count 2
define float @foo(<4 x float> %a) {
%b = insertelement <4 x float> %a, float 0.0, i32 3

View File

@ -45,6 +45,7 @@ while.end:
}
; CHECK: f2
; CHECK: for.body
;
; This loop contains two cvtsi2ss instructions that update the same xmm
; register. Verify that the execution dependency fix pass breaks those

View File

@ -140,15 +140,15 @@ define double @ole_inverse(double %x, double %y) nounwind {
}
; CHECK: x_ogt:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_ogt:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ogt:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_ogt(double %x) nounwind {
@ -158,15 +158,15 @@ define double @x_ogt(double %x) nounwind {
}
; CHECK: x_olt:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_olt:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_olt:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_olt(double %x) nounwind {
@ -176,17 +176,17 @@ define double @x_olt(double %x) nounwind {
}
; CHECK: x_ogt_inverse:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_ogt_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ogt_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -197,17 +197,17 @@ define double @x_ogt_inverse(double %x) nounwind {
}
; CHECK: x_olt_inverse:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_olt_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_olt_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -220,11 +220,11 @@ define double @x_olt_inverse(double %x) nounwind {
; CHECK: x_oge:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: x_oge:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_oge:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_oge(double %x) nounwind {
@ -236,11 +236,11 @@ define double @x_oge(double %x) nounwind {
; CHECK: x_ole:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: x_ole:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ole:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_ole(double %x) nounwind {
@ -252,12 +252,12 @@ define double @x_ole(double %x) nounwind {
; CHECK: x_oge_inverse:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: x_oge_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_oge_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -270,12 +270,12 @@ define double @x_oge_inverse(double %x) nounwind {
; CHECK: x_ole_inverse:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: x_ole_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ole_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -414,11 +414,11 @@ define double @ule_inverse(double %x, double %y) nounwind {
; CHECK: x_ugt:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: x_ugt:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ugt:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_ugt(double %x) nounwind {
@ -430,11 +430,11 @@ define double @x_ugt(double %x) nounwind {
; CHECK: x_ult:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: x_ult:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ult:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_ult(double %x) nounwind {
@ -446,12 +446,12 @@ define double @x_ult(double %x) nounwind {
; CHECK: x_ugt_inverse:
; CHECK: ucomisd %xmm0, %xmm1
; UNSAFE: x_ugt_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ugt_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -464,12 +464,12 @@ define double @x_ugt_inverse(double %x) nounwind {
; CHECK: x_ult_inverse:
; CHECK: ucomisd %xmm1, %xmm0
; UNSAFE: x_ult_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ult_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -480,16 +480,16 @@ define double @x_ult_inverse(double %x) nounwind {
}
; CHECK: x_uge:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_uge:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_uge:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_uge(double %x) nounwind {
@ -499,16 +499,16 @@ define double @x_uge(double %x) nounwind {
}
; CHECK: x_ule:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm0, %xmm1
; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_ule:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ule:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm1, %xmm0
; FINITE-NEXT: ret
define double @x_ule(double %x) nounwind {
@ -518,16 +518,16 @@ define double @x_ule(double %x) nounwind {
}
; CHECK: x_uge_inverse:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: minsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_uge_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: minsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_uge_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: minsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret
@ -538,16 +538,16 @@ define double @x_uge_inverse(double %x) nounwind {
}
; CHECK: x_ule_inverse:
; CHECK-NEXT: pxor %xmm1, %xmm1
; CHECK-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; CHECK-NEXT: maxsd %xmm1, %xmm0
; CHECK-NEXT: ret
; UNSAFE: x_ule_inverse:
; UNSAFE-NEXT: pxor %xmm1, %xmm1
; UNSAFE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; UNSAFE-NEXT: maxsd %xmm0, %xmm1
; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; UNSAFE-NEXT: ret
; FINITE: x_ule_inverse:
; FINITE-NEXT: pxor %xmm1, %xmm1
; FINITE-NEXT: xorp{{[sd]}} %xmm1, %xmm1
; FINITE-NEXT: maxsd %xmm0, %xmm1
; FINITE-NEXT: movap{{[sd]}} %xmm1, %xmm0
; FINITE-NEXT: ret

View File

@ -16,9 +16,9 @@ define double @foo() nounwind {
;CHECK-32: ret
;CHECK-64: foo:
;CHECK-64: pxor
;CHECK-64: xorps
;CHECK-64: call
;CHECK-64: pxor
;CHECK-64: xorps
;CHECK-64: ret
}
@ -33,8 +33,8 @@ define float @foof() nounwind {
;CHECK-32: ret
;CHECK-64: foof:
;CHECK-64: pxor
;CHECK-64: xorps
;CHECK-64: call
;CHECK-64: pxor
;CHECK-64: xorps
;CHECK-64: ret
}