llvm-6502/test/CodeGen/PowerPC/addi-licm.ll

65 lines
2.1 KiB
LLVM
Raw Normal View History

[PowerPC] Prepare loops for pre-increment loads/stores PowerPC supports pre-increment load/store instructions (except for Altivec/VSX vector load/stores). Using these on embedded cores can be very important, but most loops are not naturally set up to use them. We can often change that, however, by placing loops into a non-canonical form. Generically, this means transforming loops like this: for (int i = 0; i < n; ++i) array[i] = c; to look like this: T *p = array[-1]; for (int i = 0; i < n; ++i) *++p = c; the key point is that addresses accessed are pulled into dedicated PHIs and "pre-decremented" in the loop preheader. This allows the use of pre-increment load/store instructions without loop peeling. A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is introduced to perform this transformation. I've used this code out-of-tree for generating code for the PPC A2 for over a year. Somewhat to my surprise, running the test suite + externals on a P7 with this transformation enabled showed no performance regressions, and one speedup: External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk -2.32514% +/- 1.03736% So I'm going to enable it on everything for now. I was surprised by this because, on the POWER cores, these pre-increment load/store instructions are cracked (and, thus, harder to schedule effectively). But seeing no regressions, and feeling that it is generally easier to split instructions apart late than it is to combine them late, this might be the better approach regardless. In the future, we might want to integrate this functionality into LSR (but currently LSR does not create new PHI nodes, so (for that and other reasons) significant work would need to be done). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
; RUN: llc -mcpu=pwr7 -disable-ppc-preinc-prep < %s | FileCheck %s
; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PIP
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
define double @foo() #1 {
entry:
%x = alloca [2048 x float], align 4
%y = alloca [2048 x float], align 4
%0 = bitcast [2048 x float]* %x to i8*
call void @llvm.lifetime.start(i64 8192, i8* %0) #2
%1 = bitcast [2048 x float]* %y to i8*
call void @llvm.lifetime.start(i64 8192, i8* %1) #2
br label %for.body.i
; CHECK-LABEL: @foo
; CHECK: addi [[REG1:[0-9]+]], 1,
; CHECK: addi [[REG2:[0-9]+]], 1,
; CHECK: %for.body.i
; CHECK-DAG: lfsx {{[0-9]+}}, [[REG1]],
; CHECK-DAG: lfsx {{[0-9]+}}, [[REG2]],
; CHECK: blr
[PowerPC] Prepare loops for pre-increment loads/stores PowerPC supports pre-increment load/store instructions (except for Altivec/VSX vector load/stores). Using these on embedded cores can be very important, but most loops are not naturally set up to use them. We can often change that, however, by placing loops into a non-canonical form. Generically, this means transforming loops like this: for (int i = 0; i < n; ++i) array[i] = c; to look like this: T *p = array[-1]; for (int i = 0; i < n; ++i) *++p = c; the key point is that addresses accessed are pulled into dedicated PHIs and "pre-decremented" in the loop preheader. This allows the use of pre-increment load/store instructions without loop peeling. A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is introduced to perform this transformation. I've used this code out-of-tree for generating code for the PPC A2 for over a year. Somewhat to my surprise, running the test suite + externals on a P7 with this transformation enabled showed no performance regressions, and one speedup: External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk -2.32514% +/- 1.03736% So I'm going to enable it on everything for now. I was surprised by this because, on the POWER cores, these pre-increment load/store instructions are cracked (and, thus, harder to schedule effectively). But seeing no regressions, and feeling that it is generally easier to split instructions apart late than it is to combine them late, this might be the better approach regardless. In the future, we might want to integrate this functionality into LSR (but currently LSR does not create new PHI nodes, so (for that and other reasons) significant work would need to be done). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
; PIP-LABEL: @foo
; PIP: addi [[REG1:[0-9]+]], 1,
; PIP: addi [[REG2:[0-9]+]], 1,
; PIP: %for.body.i
; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG1]])
; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG2]])
; PIP: blr
for.body.i: ; preds = %for.body.i.preheader, %for.body.i
%accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
%i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
%arrayidx.i = getelementptr inbounds [2048 x float]* %x, i64 0, i64 %i.08.i
%v14 = load float* %arrayidx.i, align 4
%conv.i = fpext float %v14 to double
%arrayidx1.i = getelementptr inbounds [2048 x float]* %y, i64 0, i64 %i.08.i
%v15 = load float* %arrayidx1.i, align 4
%conv2.i = fpext float %v15 to double
%mul.i = fmul double %conv.i, %conv2.i
%add.i = fadd double %accumulator.09.i, %mul.i
%inc.i = add nuw nsw i64 %i.08.i, 1
%exitcond.i = icmp eq i64 %i.08.i, 2047
br i1 %exitcond.i, label %loop.exit, label %for.body.i
loop.exit: ; preds = %for.body.i
ret double %accumulator.09.i
}
; Function Attrs: nounwind
declare void @llvm.lifetime.start(i64, i8* nocapture) #2
declare void @bar(float*, float*)
; Function Attrs: nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture) #2
attributes #0 = { nounwind readonly }
attributes #1 = { nounwind }
attributes #2 = { nounwind }