2008-09-24 23:13:09 +00:00
|
|
|
; RUN: llvm-as < %s | llc | grep {subfc r3,r5,r4}
|
|
|
|
; RUN: llvm-as < %s | llc | grep {subfze r4,r2}
|
|
|
|
; RUN: llvm-as < %s | llc -regalloc=local | grep {subfc r5,r2,r4}
|
|
|
|
; RUN: llvm-as < %s | llc -regalloc=local | grep {subfze r2,r3}
|
|
|
|
; The first argument of subfc must not be the same as any other register.
|
2007-04-30 21:10:13 +00:00
|
|
|
|
|
|
|
; PR1357
|
|
|
|
|
|
|
|
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64"
|
|
|
|
target triple = "powerpc-apple-darwin8.8.0"
|
|
|
|
|
|
|
|
;long long test(int A, int B, int C) {
|
|
|
|
; unsigned X, Y;
|
|
|
|
; __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2"
|
|
|
|
; : "=r" (X), "=&r" (Y)
|
|
|
|
; : "r" (A), "rI" (B), "r" (C));
|
|
|
|
; return ((long long)Y << 32) | X;
|
|
|
|
;}
|
|
|
|
|
Fix PR3149. If an early clobber def is a physical register and it is tied to an input operand, it effectively extends the live range of the physical register. Currently we do not have a good way to represent this.
172 %ECX<def> = MOV32rr %reg1039<kill>
180 INLINEASM <es:subl $5,$1
sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9, %EAX<kill>,
36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
188 %EAX<def> = MOV32rr %EAX<kill>
196 %ECX<def> = MOV32rr %ECX<kill>
204 %ECX<def> = MOV32rr %ECX<kill>
212 %EAX<def> = MOV32rr %EAX<kill>
220 %EAX<def> = MOV32rr %EAX
228 %reg1039<def> = MOV32rr %ECX<kill>
The early clobber operand ties ECX input to the ECX def.
The live interval of ECX is represented as this:
%reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
The right way to represent this is something like
%reg20,inf = [46,47:2)[174,182:1)[181:230:0) 0@174-(182) 1@181-230 @2@46-(47)
Of course that won't work since that means overlapping live ranges defined by two val#.
The workaround for now is to add a bit to val# which says the val# is redefined by a early clobber def somewhere. This prevents the move at 228 from being optimized away by SimpleRegisterCoalescing::AdjustCopiesBackFrom.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61259 91177308-0d34-0410-b5e6-96231b3b80d8
2008-12-19 20:58:01 +00:00
|
|
|
define i64 @test(i32 %A, i32 %B, i32 %C) nounwind {
|
2007-04-30 21:10:13 +00:00
|
|
|
entry:
|
|
|
|
%Y = alloca i32, align 4 ; <i32*> [#uses=2]
|
|
|
|
%tmp4 = call i32 asm "subf${3:I}c $1,$4,$3\0A\09subfze $0,$2", "=r,=*&r,r,rI,r"( i32* %Y, i32 %A, i32 %B, i32 %C ) ; <i32> [#uses=1]
|
|
|
|
%tmp5 = load i32* %Y ; <i32> [#uses=1]
|
|
|
|
%tmp56 = zext i32 %tmp5 to i64 ; <i64> [#uses=1]
|
|
|
|
%tmp7 = shl i64 %tmp56, 32 ; <i64> [#uses=1]
|
|
|
|
%tmp89 = zext i32 %tmp4 to i64 ; <i64> [#uses=1]
|
|
|
|
%tmp10 = or i64 %tmp7, %tmp89 ; <i64> [#uses=1]
|
|
|
|
ret i64 %tmp10
|
|
|
|
}
|