mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-23 15:29:51 +00:00
36e1825e68
This change enables tracking i1 values in the PowerPC backend using the condition register bits. These bits can be treated on PowerPC as separate registers; individual bit operations (and, or, xor, etc.) are supported. Tracking booleans in CR bits has several advantages: - Reduction in register pressure (because we no longer need GPRs to store boolean values). - Logical operations on booleans can be handled more efficiently; we used to have to move all results from comparisons into GPRs, perform promoted logical operations in GPRs, and then move the result back into condition register bits to be used by conditional branches. This can be very inefficient, because the throughput of these CR <-> GPR moves have high latency and low throughput (especially when other associated instructions are accounted for). - On the POWER7 and similar cores, we can increase total throughput by using the CR bits. CR bit operations have a dedicated functional unit. Most of this is more-or-less mechanical: Adjustments were needed in the calling-convention code, support was added for spilling/restoring individual condition-register bits, and conditional branch instruction definitions taking specific CR bits were added (plus patterns and code for generating bit-level operations). This is enabled by default when running at -O2 and higher. For -O0 and -O1, where the ability to debug is more important, this feature is disabled by default. Individual CR bits do not have assigned DWARF register numbers, and storing values in CR bits makes them invisible to the debugger. It is critical, however, that we don't move i1 values that have been promoted to larger values (such as those passed as function arguments) into bit registers only to quickly turn around and move the values back into GPRs (such as happens when values are returned by functions). A pair of target-specific DAG combines are added to remove the trunc/extends in: trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) and: zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) In short, we only want to use CR bits where some of the i1 values come from comparisons or are used by conditional branches or selects. To put it another way, if we can do the entire i1 computation in GPRs, then we probably should (on the POWER7, the GPR-operation throughput is higher, and for all cores, the CR <-> GPR moves are expensive). POWER7 test-suite performance results (from 10 runs in each configuration): SingleSource/Benchmarks/Misc/mandel-2: 35% speedup MultiSource/Benchmarks/Prolangs-C++/city/city: 21% speedup MultiSource/Benchmarks/MiBench/automotive-susan: 23% speedup SingleSource/Benchmarks/CoyoteBench/huffbench: 13% speedup SingleSource/Benchmarks/Misc-C++/Large/sphereflake: 13% speedup SingleSource/Benchmarks/Misc-C++/mandel-text: 10% speedup SingleSource/Benchmarks/Misc-C++-EH/spirit: 10% slowdown MultiSource/Applications/lemon/lemon: 8% slowdown git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202451 91177308-0d34-0410-b5e6-96231b3b80d8
153 lines
3.6 KiB
LLVM
153 lines
3.6 KiB
LLVM
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 | FileCheck %s
|
|
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
define signext i32 @foo(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i32 %a, %b
|
|
store i32 %sub, i32* %c, align 4
|
|
%cmp = icmp sgt i32 %a, %b
|
|
%cond = select i1 %cmp, i32 %a, i32 %b
|
|
ret i32 %cond
|
|
|
|
; CHECK: @foo
|
|
; CHECK-NOT: subf.
|
|
}
|
|
|
|
define signext i32 @foo2(i32 signext %a, i32 signext %b, i32* nocapture %c) #0 {
|
|
entry:
|
|
%shl = shl i32 %a, %b
|
|
store i32 %shl, i32* %c, align 4
|
|
%cmp = icmp sgt i32 %shl, 0
|
|
%conv = zext i1 %cmp to i32
|
|
ret i32 %conv
|
|
|
|
; CHECK: @foo2
|
|
; CHECK-NOT: slw.
|
|
}
|
|
|
|
define i64 @fool(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %a, %b
|
|
store i64 %sub, i64* %c, align 8
|
|
%cmp = icmp sgt i64 %a, %b
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @fool
|
|
; CHECK: subf. [[REG:[0-9]+]], 4, 3
|
|
; CHECK: isel 3, 3, 4, 1
|
|
; CHECK: std [[REG]], 0(5)
|
|
}
|
|
|
|
define i64 @foolb(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %a, %b
|
|
store i64 %sub, i64* %c, align 8
|
|
%cmp = icmp sle i64 %a, %b
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @foolb
|
|
; CHECK: subf. [[REG:[0-9]+]], 4, 3
|
|
; CHECK: isel 3, 4, 3, 1
|
|
; CHECK: std [[REG]], 0(5)
|
|
}
|
|
|
|
define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %b, %a
|
|
store i64 %sub, i64* %c, align 8
|
|
%cmp = icmp sgt i64 %a, %b
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @foolc
|
|
; CHECK: subf. [[REG:[0-9]+]], 3, 4
|
|
; CHECK: isel 3, 3, 4, 0
|
|
; CHECK: std [[REG]], 0(5)
|
|
}
|
|
|
|
define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %b, %a
|
|
store i64 %sub, i64* %c, align 8
|
|
%cmp = icmp eq i64 %a, %b
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @foold
|
|
; CHECK: subf. [[REG:[0-9]+]], 3, 4
|
|
; CHECK: isel 3, 3, 4, 2
|
|
; CHECK: std [[REG]], 0(5)
|
|
}
|
|
|
|
define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %a, %b
|
|
store i64 %sub, i64* %c, align 8
|
|
%cmp = icmp eq i64 %a, %b
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @foold2
|
|
; CHECK: subf. [[REG:[0-9]+]], 4, 3
|
|
; CHECK: isel 3, 3, 4, 2
|
|
; CHECK: std [[REG]], 0(5)
|
|
}
|
|
|
|
define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%shl = shl i64 %a, %b
|
|
store i64 %shl, i64* %c, align 8
|
|
%cmp = icmp sgt i64 %shl, 0
|
|
%conv1 = zext i1 %cmp to i64
|
|
ret i64 %conv1
|
|
|
|
; CHECK: @foo2l
|
|
; CHECK: sld. 4, 3, 4
|
|
; CHECK: std 4, 0(5)
|
|
}
|
|
|
|
define double @food(double %a, double %b, double* nocapture %c) #0 {
|
|
entry:
|
|
%sub = fsub double %a, %b
|
|
store double %sub, double* %c, align 8
|
|
%cmp = fcmp ogt double %a, %b
|
|
%cond = select i1 %cmp, double %a, double %b
|
|
ret double %cond
|
|
|
|
; CHECK: @food
|
|
; CHECK-NOT: fsub. 0, 1, 2
|
|
; CHECK: stfd 0, 0(5)
|
|
}
|
|
|
|
define float @foof(float %a, float %b, float* nocapture %c) #0 {
|
|
entry:
|
|
%sub = fsub float %a, %b
|
|
store float %sub, float* %c, align 4
|
|
%cmp = fcmp ogt float %a, %b
|
|
%cond = select i1 %cmp, float %a, float %b
|
|
ret float %cond
|
|
|
|
; CHECK: @foof
|
|
; CHECK-NOT: fsubs. 0, 1, 2
|
|
; CHECK: stfs 0, 0(5)
|
|
}
|
|
|
|
declare i64 @llvm.ctpop.i64(i64);
|
|
|
|
define signext i64 @fooct(i64 signext %a, i64 signext %b, i64* nocapture %c) #0 {
|
|
entry:
|
|
%sub = sub nsw i64 %a, %b
|
|
%subc = call i64 @llvm.ctpop.i64(i64 %sub)
|
|
store i64 %subc, i64* %c, align 4
|
|
%cmp = icmp sgt i64 %subc, 0
|
|
%cond = select i1 %cmp, i64 %a, i64 %b
|
|
ret i64 %cond
|
|
|
|
; CHECK: @fooct
|
|
; CHECK-NOT: popcntd.
|
|
}
|
|
|