2015-01-03 14:58:25 +00:00
|
|
|
; RUN: llc -mcpu=ppc64 < %s | FileCheck %s -check-prefix=GENERIC
|
2015-01-03 17:58:24 +00:00
|
|
|
; RUN: llc -mcpu=970 < %s | FileCheck %s -check-prefix=PWR
|
2015-01-03 14:58:25 +00:00
|
|
|
; RUN: llc -mcpu=a2 < %s | FileCheck %s -check-prefix=BASIC
|
|
|
|
; RUN: llc -mcpu=e500mc < %s | FileCheck %s -check-prefix=BASIC
|
|
|
|
; RUN: llc -mcpu=e5500 < %s | FileCheck %s -check-prefix=BASIC
|
2015-01-03 17:58:24 +00:00
|
|
|
; RUN: llc -mcpu=pwr4 < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr5 < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr5x < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr6 < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr6x < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PWR
|
|
|
|
; RUN: llc -mcpu=pwr8 < %s | FileCheck %s -check-prefix=PWR
|
2015-01-03 14:58:25 +00:00
|
|
|
target datalayout = "E-m:e-i64:64-n32:64"
|
|
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
; Function Attrs: nounwind readnone
|
|
|
|
define signext i32 @foo(i32 signext %x) #0 {
|
|
|
|
entry:
|
|
|
|
%mul = shl nsw i32 %x, 1
|
|
|
|
ret i32 %mul
|
|
|
|
|
|
|
|
; GENERIC-LABEL: .globl foo
|
|
|
|
; BASIC-LABEL: .globl foo
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR-LABEL: .globl foo
|
2015-01-03 14:58:25 +00:00
|
|
|
; GENERIC: .align 2
|
|
|
|
; BASIC: .align 4
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR: .align 4
|
2015-01-03 14:58:25 +00:00
|
|
|
; GENERIC: @foo
|
|
|
|
; BASIC: @foo
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR: @foo
|
2015-01-03 14:58:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
; Function Attrs: nounwind
|
|
|
|
define void @loop(i32 signext %x, i32* nocapture %a) #1 {
|
|
|
|
entry:
|
|
|
|
br label %vector.body
|
|
|
|
|
|
|
|
; GENERIC-LABEL: @loop
|
|
|
|
; BASIC-LABEL: @loop
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR-LABEL: @loop
|
2015-01-03 14:58:25 +00:00
|
|
|
; GENERIC: mtctr
|
|
|
|
; BASIC: mtctr
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR: mtctr
|
2015-01-03 14:58:25 +00:00
|
|
|
; GENERIC-NOT: .align
|
|
|
|
; BASIC: .align 4
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR: .align 4
|
[PowerPC] Prepare loops for pre-increment loads/stores
PowerPC supports pre-increment load/store instructions (except for Altivec/VSX
vector load/stores). Using these on embedded cores can be very important, but
most loops are not naturally set up to use them. We can often change that,
however, by placing loops into a non-canonical form. Generically, this means
transforming loops like this:
for (int i = 0; i < n; ++i)
array[i] = c;
to look like this:
T *p = array[-1];
for (int i = 0; i < n; ++i)
*++p = c;
the key point is that addresses accessed are pulled into dedicated PHIs and
"pre-decremented" in the loop preheader. This allows the use of pre-increment
load/store instructions without loop peeling.
A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is
introduced to perform this transformation. I've used this code out-of-tree for
generating code for the PPC A2 for over a year. Somewhat to my surprise,
running the test suite + externals on a P7 with this transformation enabled
showed no performance regressions, and one speedup:
External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk
-2.32514% +/- 1.03736%
So I'm going to enable it on everything for now. I was surprised by this
because, on the POWER cores, these pre-increment load/store instructions are
cracked (and, thus, harder to schedule effectively). But seeing no regressions,
and feeling that it is generally easier to split instructions apart late than
it is to combine them late, this might be the better approach regardless.
In the future, we might want to integrate this functionality into LSR (but
currently LSR does not create new PHI nodes, so (for that and other reasons)
significant work would need to be done).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
|
|
|
; GENERIC: lwzu
|
|
|
|
; BASIC: lwzu
|
|
|
|
; PWR: lwzu
|
2015-01-03 14:58:25 +00:00
|
|
|
; GENERIC: bdnz
|
|
|
|
; BASIC: bdnz
|
2015-01-03 17:58:24 +00:00
|
|
|
; PWR: bdnz
|
2015-01-03 14:58:25 +00:00
|
|
|
|
|
|
|
vector.body: ; preds = %vector.body, %entry
|
|
|
|
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
|
|
|
|
%induction45 = or i64 %index, 1
|
|
|
|
%0 = getelementptr inbounds i32* %a, i64 %index
|
|
|
|
%1 = getelementptr inbounds i32* %a, i64 %induction45
|
|
|
|
%2 = load i32* %0, align 4
|
|
|
|
%3 = load i32* %1, align 4
|
|
|
|
%4 = add nsw i32 %2, 4
|
|
|
|
%5 = add nsw i32 %3, 4
|
[PowerPC] Prepare loops for pre-increment loads/stores
PowerPC supports pre-increment load/store instructions (except for Altivec/VSX
vector load/stores). Using these on embedded cores can be very important, but
most loops are not naturally set up to use them. We can often change that,
however, by placing loops into a non-canonical form. Generically, this means
transforming loops like this:
for (int i = 0; i < n; ++i)
array[i] = c;
to look like this:
T *p = array[-1];
for (int i = 0; i < n; ++i)
*++p = c;
the key point is that addresses accessed are pulled into dedicated PHIs and
"pre-decremented" in the loop preheader. This allows the use of pre-increment
load/store instructions without loop peeling.
A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is
introduced to perform this transformation. I've used this code out-of-tree for
generating code for the PPC A2 for over a year. Somewhat to my surprise,
running the test suite + externals on a P7 with this transformation enabled
showed no performance regressions, and one speedup:
External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk
-2.32514% +/- 1.03736%
So I'm going to enable it on everything for now. I was surprised by this
because, on the POWER cores, these pre-increment load/store instructions are
cracked (and, thus, harder to schedule effectively). But seeing no regressions,
and feeling that it is generally easier to split instructions apart late than
it is to combine them late, this might be the better approach regardless.
In the future, we might want to integrate this functionality into LSR (but
currently LSR does not create new PHI nodes, so (for that and other reasons)
significant work would need to be done).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
|
|
|
%6 = mul nsw i32 %4, 3
|
|
|
|
%7 = mul nsw i32 %5, 3
|
|
|
|
store i32 %6, i32* %0, align 4
|
|
|
|
store i32 %7, i32* %1, align 4
|
2015-01-03 14:58:25 +00:00
|
|
|
%index.next = add i64 %index, 2
|
[PowerPC] Prepare loops for pre-increment loads/stores
PowerPC supports pre-increment load/store instructions (except for Altivec/VSX
vector load/stores). Using these on embedded cores can be very important, but
most loops are not naturally set up to use them. We can often change that,
however, by placing loops into a non-canonical form. Generically, this means
transforming loops like this:
for (int i = 0; i < n; ++i)
array[i] = c;
to look like this:
T *p = array[-1];
for (int i = 0; i < n; ++i)
*++p = c;
the key point is that addresses accessed are pulled into dedicated PHIs and
"pre-decremented" in the loop preheader. This allows the use of pre-increment
load/store instructions without loop peeling.
A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is
introduced to perform this transformation. I've used this code out-of-tree for
generating code for the PPC A2 for over a year. Somewhat to my surprise,
running the test suite + externals on a P7 with this transformation enabled
showed no performance regressions, and one speedup:
External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk
-2.32514% +/- 1.03736%
So I'm going to enable it on everything for now. I was surprised by this
because, on the POWER cores, these pre-increment load/store instructions are
cracked (and, thus, harder to schedule effectively). But seeing no regressions,
and feeling that it is generally easier to split instructions apart late than
it is to combine them late, this might be the better approach regardless.
In the future, we might want to integrate this functionality into LSR (but
currently LSR does not create new PHI nodes, so (for that and other reasons)
significant work would need to be done).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
|
|
|
%8 = icmp eq i64 %index.next, 2048
|
|
|
|
br i1 %8, label %for.end, label %vector.body
|
2015-01-03 14:58:25 +00:00
|
|
|
|
|
|
|
for.end: ; preds = %vector.body
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-01-03 17:58:24 +00:00
|
|
|
; Function Attrs: nounwind
|
|
|
|
define void @sloop(i32 signext %x, i32* nocapture %a) #1 {
|
|
|
|
entry:
|
|
|
|
br label %for.body
|
|
|
|
|
|
|
|
; GENERIC-LABEL: @sloop
|
|
|
|
; BASIC-LABEL: @sloop
|
|
|
|
; PWR-LABEL: @sloop
|
|
|
|
; GENERIC: mtctr
|
|
|
|
; BASIC: mtctr
|
|
|
|
; PWR: mtctr
|
|
|
|
; GENERIC-NOT: .align
|
|
|
|
; BASIC: .align 4
|
|
|
|
; PWR: .align 5
|
|
|
|
; GENERIC: bdnz
|
|
|
|
; BASIC: bdnz
|
|
|
|
; PWR: bdnz
|
|
|
|
|
|
|
|
for.body: ; preds = %for.body, %entry
|
|
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
|
|
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
|
|
|
|
%0 = load i32* %arrayidx, align 4
|
|
|
|
%add = add nsw i32 %0, 4
|
[PowerPC] Prepare loops for pre-increment loads/stores
PowerPC supports pre-increment load/store instructions (except for Altivec/VSX
vector load/stores). Using these on embedded cores can be very important, but
most loops are not naturally set up to use them. We can often change that,
however, by placing loops into a non-canonical form. Generically, this means
transforming loops like this:
for (int i = 0; i < n; ++i)
array[i] = c;
to look like this:
T *p = array[-1];
for (int i = 0; i < n; ++i)
*++p = c;
the key point is that addresses accessed are pulled into dedicated PHIs and
"pre-decremented" in the loop preheader. This allows the use of pre-increment
load/store instructions without loop peeling.
A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is
introduced to perform this transformation. I've used this code out-of-tree for
generating code for the PPC A2 for over a year. Somewhat to my surprise,
running the test suite + externals on a P7 with this transformation enabled
showed no performance regressions, and one speedup:
External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk
-2.32514% +/- 1.03736%
So I'm going to enable it on everything for now. I was surprised by this
because, on the POWER cores, these pre-increment load/store instructions are
cracked (and, thus, harder to schedule effectively). But seeing no regressions,
and feeling that it is generally easier to split instructions apart late than
it is to combine them late, this might be the better approach regardless.
In the future, we might want to integrate this functionality into LSR (but
currently LSR does not create new PHI nodes, so (for that and other reasons)
significant work would need to be done).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-05 18:43:00 +00:00
|
|
|
%mul = mul nsw i32 %add, 3
|
|
|
|
store i32 %mul, i32* %arrayidx, align 4
|
2015-01-03 17:58:24 +00:00
|
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
|
|
%exitcond = icmp eq i64 %indvars.iv.next, 2048
|
|
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
|
|
|
|
for.end: ; preds = %for.body
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-01-03 14:58:25 +00:00
|
|
|
attributes #0 = { nounwind readnone }
|
|
|
|
attributes #1 = { nounwind }
|
|
|
|
|