mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-11 11:34:02 +00:00
they are produced by calls (which are known exact) and by cross block copies which are known to be produced by extends. This improves: define double @test2() { %tmp85 = call double asm sideeffect "fld0", "={st(0)}"() ret double %tmp85 } from: _test2: subl $20, %esp # InlineAsm Start fld0 # InlineAsm End fstpl 8(%esp) movsd 8(%esp), %xmm0 movsd %xmm0, (%esp) fldl (%esp) addl $20, %esp #FP_REG_KILL ret to: _test2: # InlineAsm Start fld0 # InlineAsm End #FP_REG_KILL ret by avoiding a f64 <-> f80 trip git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48108 91177308-0d34-0410-b5e6-96231b3b80d8