diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 35127eb84db..9fa7bb1d3cc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -217,10 +217,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (!X86ScalarSSEf64) { setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); - if (Subtarget->is64Bit()) { - setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); - setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); - } } // Scalar integer divide and remainder are lowered to use operations that diff --git a/test/CodeGen/X86/2010-05-16-nosseconversion.ll b/test/CodeGen/X86/2010-05-16-nosseconversion.ll index 266681cd169..e69de29bb2d 100644 --- a/test/CodeGen/X86/2010-05-16-nosseconversion.ll +++ b/test/CodeGen/X86/2010-05-16-nosseconversion.ll @@ -1,11 +0,0 @@ -; RUN: llc -mtriple=x86_64-apple-darwin -mattr=-sse < %s - -@x = common global i64 0 ; [#uses=1] - -define i32 @foo() nounwind readonly ssp { -entry: - %0 = load i64* @x, align 8 ; [#uses=1] - %1 = uitofp i64 %0 to double ; [#uses=1] - %2 = fptosi double %1 to i32 ; [#uses=1] - ret i32 %2 -}