Use sdmem and sse_load_f64 (etc.) for the vector

form of CMPSD (etc.)  Matching a 128-bit memory
operand is wrong, the instruction uses only 64 bits
(same as ADDSD etc.)  8193553.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110491 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Dale Johannesen 2010-08-07 00:33:42 +00:00
parent 5015b3417f
commit 7f6eb639bd
2 changed files with 40 additions and 8 deletions

View File

@ -1163,31 +1163,36 @@ let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
"cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
}
multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
Intrinsic Int, string asm> {
multiclass sse12_cmp_scalar_int<RegisterClass RC, Operand memopr,
ComplexPattern mem_cpat, Intrinsic Int, string asm> {
def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
(ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
VR128:$src, imm:$cc))]>;
def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
(ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
(ins VR128:$src1, memopr:$src, SSECC:$cc), asm,
[(set VR128:$dst, (Int VR128:$src1,
(load addr:$src), imm:$cc))]>;
mem_cpat:$src, imm:$cc))]>;
}
// Aliases to match intrinsics which expect XMM operand(s).
let isAsmParserOnly = 1 in {
defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, ssmem, sse_load_f32,
int_x86_sse_cmp_ss,
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
XS, VEX_4V;
defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, sdmem, sse_load_f64,
int_x86_sse2_cmp_sd,
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
XD, VEX_4V;
}
let Constraints = "$src1 = $dst" in {
defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
defm Int_CMPSS : sse12_cmp_scalar_int<VR128, ssmem, sse_load_f32,
int_x86_sse_cmp_ss,
"cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
defm Int_CMPSD : sse12_cmp_scalar_int<VR128, sdmem, sse_load_f64,
int_x86_sse2_cmp_sd,
"cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
}

View File

@ -0,0 +1,27 @@
; RUN: llc < %s -mtriple=x86_64-applecl-darwin11 | FileCheck %s
; 8193553
define void @__math_kernel_Vectorized_wrapper(<4 x double> addrspace(1)* %a, <4 x double> addrspace(1)* %b, i64 addrspace(1)* %c, i64 addrspace(1)* %d) nounwind {
entry.i: ; preds = %entry.i, %loop
; CHECK: math_kernel_Vectorized_wrapper
; CHECK-NOT: cmpordsd (%rsi),
%0 = alloca i8
%1 = alloca i8
%2 = alloca i8
%tmp213.i = load <4 x double> addrspace(1)* %a ; <<4 x double>> [#uses=4]
%extract25.i = extractelement <4 x double> %tmp213.i, i32 1 ; <double> [#uses=1]
%tmp723.i = load <4 x double> addrspace(1)* %b ; <<4 x double>> [#uses=4]
%extract29.i = extractelement <4 x double> %tmp723.i, i32 1 ; <double> [#uses=1]
%tmp2.i26 = insertelement <2 x double> undef, double %extract25.i, i32 0 ; <<2 x double>> [#uses=1]
%tmp5.i27 = insertelement <2 x double> undef, double %extract29.i, i32 1 ; <<2 x double>> [#uses=1]
%cmpsd.i.i28 = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %tmp2.i26, <2 x double> %tmp5.i27, i8 7) nounwind ; <<2 x double>> [#uses=1]
%3 = bitcast <2 x double> %cmpsd.i.i28 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp12.i29 = extractelement <4 x i32> %3, i32 0 ; <i32> [#uses=1]
%and.i30 = and i32 %tmp12.i29, 1 ; <i32> [#uses=1]
%conv937.i36 = zext i32 %and.i30 to i64 ; <i64> [#uses=1]
store i64 %conv937.i36, i64 addrspace(1)* %d
ret void
; CHECK: ret
}
declare <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double>, <2 x double>, i8) nounwind readnone