diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index ac4f206c41d..9b8ff72dded 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -6492,11 +6492,13 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, // AND(VECTOR_CMP(x,y), constant2) // constant2 = UNARYOP(constant) - // Early exit if this isn't a vector operation or if the operand of the - // unary operation isn't a bitwise AND. + // Early exit if this isn't a vector operation, the operand of the + // unary operation isn't a bitwise AND, or if the sizes of the operations + // aren't the same. EVT VT = N->getValueType(0); if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || - N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC) + N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || + VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) return SDValue(); // Now check that the other operand of the AND is a constant splat. We could diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index cba145208da..95666a47da8 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -21806,11 +21806,13 @@ static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, // AND(VECTOR_CMP(x,y), constant2) // constant2 = UNARYOP(constant) - // Early exit if this isn't a vector operation or if the operand of the - // unary operation isn't a bitwise AND. + // Early exit if this isn't a vector operation, the operand of the + // unary operation isn't a bitwise AND, or if the sizes of the operations + // aren't the same. EVT VT = N->getValueType(0); if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || - N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC) + N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || + VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) return SDValue(); // Now check that the other operand of the AND is a constant splat. We could diff --git a/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll b/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll index b10fe758d95..045c9cd9aeb 100644 --- a/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll +++ b/test/CodeGen/AArch64/arm64-setcc-int-to-fp-combine.ll @@ -11,3 +11,17 @@ define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind { %result = sitofp <4 x i32> %ext to <4 x float> ret <4 x float> %result } +; Make sure the operation doesn't try to get folded when the sizes don't match, +; as that ends up crashing later when trying to form a bitcast operation for +; the folded nodes. +define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwind { +; CHECK-LABEL: foo1: +; CHECK: movi.4s +; CHECK: scvtf.2d +; CHECK: scvtf.2d + %cmp = fcmp oeq <4 x float> %val, %test + %ext = zext <4 x i1> %cmp to <4 x i32> + %result = sitofp <4 x i32> %ext to <4 x double> + store <4 x double> %result, <4 x double>* %p + ret void +} diff --git a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll index 86c4eb7c49e..f737519bd15 100644 --- a/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll +++ b/test/CodeGen/X86/x86-setcc-int-to-fp-combine.ll @@ -17,14 +17,38 @@ define <4 x float> @foo(<4 x float> %val, <4 x float> %test) nounwind { ret <4 x float> %result } -define void @bar(<4 x float>* noalias %result) nounwind { +; Make sure the operation doesn't try to get folded when the sizes don't match, +; as that ends up crashing later when trying to form a bitcast operation for +; the folded nodes. +define void @foo1(<4 x float> %val, <4 x float> %test, <4 x double>* %p) nounwind { ; CHECK-LABEL: LCPI1_0: +; CHECK-NEXT: .long 1 ## 0x1 +; CHECK-NEXT: .long 1 ## 0x1 +; CHECK-NEXT: .long 1 ## 0x1 +; CHECK-NEXT: .long 1 ## 0x1 +; CHECK-LABEL: foo1: +; FIXME: The operation gets scalarized. If/when the compiler learns to better +; use [V]CVTDQ2PD, this will need updated. +; CHECK: cvtsi2sdq +; CHECK: cvtsi2sdq +; CHECK: cvtsi2sdq +; CHECK: cvtsi2sdq + %cmp = fcmp oeq <4 x float> %val, %test + %ext = zext <4 x i1> %cmp to <4 x i32> + %result = sitofp <4 x i32> %ext to <4 x double> + store <4 x double> %result, <4 x double>* %p + ret void +} + +; Also test the general purpose constant folding of int->fp. +define void @foo2(<4 x float>* noalias %result) nounwind { +; CHECK-LABEL: LCPI2_0: ; CHECK-NEXT: .long 1082130432 ## float 4.000000e+00 ; CHECK-NEXT: .long 1084227584 ## float 5.000000e+00 ; CHECK-NEXT: .long 1086324736 ## float 6.000000e+00 ; CHECK-NEXT: .long 1088421888 ## float 7.000000e+00 -; CHECK-LABEL: bar: -; CHECK: movaps LCPI1_0(%rip), %xmm0 +; CHECK-LABEL: foo2: +; CHECK: movaps LCPI2_0(%rip), %xmm0 %val = uitofp <4 x i32> to <4 x float> store <4 x float> %val, <4 x float>* %result