mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-06 05:06:45 +00:00
a06ec9efe1
optimize it to a SINT_TO_FP when the sign bit is known zero. X86 isel should perform the optimization itself. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@62504 91177308-0d34-0410-b5e6-96231b3b80d8
15 lines
514 B
LLVM
15 lines
514 B
LLVM
; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | not grep {sub.*esp}
|
|
; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | grep cvtsi2ss
|
|
; rdar://6034396
|
|
|
|
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
|
target triple = "i386-apple-darwin8"
|
|
|
|
define void @test(i32 %x, float* %y) nounwind {
|
|
entry:
|
|
lshr i32 %x, 23 ; <i32>:0 [#uses=1]
|
|
uitofp i32 %0 to float ; <float>:1 [#uses=1]
|
|
store float %1, float* %y
|
|
ret void
|
|
}
|