mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
SLPVectorizer: improved scheduling algorithm.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214494 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
0a15e20ba2
commit
956268f9dc
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
; RUN: opt -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s
|
||||
; RUN: opt -basicaa -slp-vectorizer -mtriple=x86_64-apple-macosx10.9.0 -mcpu=corei7-avx -S < %s | FileCheck %s
|
||||
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-apple-macosx10.9.0"
|
||||
|
||||
|
@ -5,9 +5,11 @@ target triple = "x86_64-apple-macosx10.7.0"
|
||||
|
||||
@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1
|
||||
|
||||
; We can't vectorize when the roots are used inside the tree.
|
||||
; Uses inside the tree must be scheduled after the corresponding tree bundle.
|
||||
;CHECK-LABEL: @in_tree_user(
|
||||
;CHECK-NOT: load <2 x double>
|
||||
;CHECK: load <2 x double>
|
||||
;CHECK: fadd <2 x double>
|
||||
;CHECK: InTreeUser = fadd
|
||||
;CHECK: ret
|
||||
define void @in_tree_user(double* nocapture %A, i32 %n) {
|
||||
entry:
|
||||
@ -22,7 +24,7 @@ for.body: ; preds = %for.inc, %entry
|
||||
%mul1 = fmul double %conv, %1
|
||||
%mul2 = fmul double %mul1, 7.000000e+00
|
||||
%add = fadd double %mul2, 5.000000e+00
|
||||
%BadValue = fadd double %add, %add ; <------------------ In tree user.
|
||||
%InTreeUser = fadd double %add, %add ; <------------------ In tree user.
|
||||
%2 = or i64 %0, 1
|
||||
%arrayidx6 = getelementptr inbounds double* %A, i64 %2
|
||||
%3 = load double* %arrayidx6, align 8
|
||||
@ -43,6 +45,7 @@ for.inc: ; preds = %for.body, %if.then
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end: ; preds = %for.inc
|
||||
store double %InTreeUser, double* %A, align 8 ; Avoid dead code elimination of the InTreeUser.
|
||||
ret void
|
||||
}
|
||||
|
||||
|
78
test/Transforms/SLPVectorizer/X86/scheduling.ll
Normal file
78
test/Transforms/SLPVectorizer/X86/scheduling.ll
Normal file
@ -0,0 +1,78 @@
|
||||
; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
||||
target triple = "x86_64-apple-macosx10.9.0"
|
||||
|
||||
;CHECK-LABEL: @foo
|
||||
;CHECK: load <4 x i32>
|
||||
;CHECK: load <4 x i32>
|
||||
;CHECK: %[[S1:.+]] = add <4 x i32>
|
||||
;CHECK-DAG: store <4 x i32> %[[S1]]
|
||||
;CHECK-DAG: %[[A1:.+]] = add nsw i32
|
||||
;CHECK-DAG: %[[A2:.+]] = add nsw i32 %[[A1]]
|
||||
;CHECK-DAG: %[[A3:.+]] = add nsw i32 %[[A2]]
|
||||
;CHECK-DAG: %[[A4:.+]] = add nsw i32 %[[A3]]
|
||||
;CHECK: ret i32 %[[A4]]
|
||||
|
||||
define i32 @foo(i32* nocapture readonly %diff) #0 {
|
||||
entry:
|
||||
%m2 = alloca [8 x [8 x i32]], align 16
|
||||
%0 = bitcast [8 x [8 x i32]]* %m2 to i8*
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %for.body, %entry
|
||||
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
||||
%a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
|
||||
%1 = shl i64 %indvars.iv, 3
|
||||
%arrayidx = getelementptr inbounds i32* %diff, i64 %1
|
||||
%2 = load i32* %arrayidx, align 4
|
||||
%3 = or i64 %1, 4
|
||||
%arrayidx2 = getelementptr inbounds i32* %diff, i64 %3
|
||||
%4 = load i32* %arrayidx2, align 4
|
||||
%add3 = add nsw i32 %4, %2
|
||||
%arrayidx6 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
|
||||
store i32 %add3, i32* %arrayidx6, align 16
|
||||
%add10 = add nsw i32 %add3, %a.088
|
||||
%5 = or i64 %1, 1
|
||||
%arrayidx13 = getelementptr inbounds i32* %diff, i64 %5
|
||||
%6 = load i32* %arrayidx13, align 4
|
||||
%7 = or i64 %1, 5
|
||||
%arrayidx16 = getelementptr inbounds i32* %diff, i64 %7
|
||||
%8 = load i32* %arrayidx16, align 4
|
||||
%add17 = add nsw i32 %8, %6
|
||||
%arrayidx20 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
|
||||
store i32 %add17, i32* %arrayidx20, align 4
|
||||
%add24 = add nsw i32 %add10, %add17
|
||||
%9 = or i64 %1, 2
|
||||
%arrayidx27 = getelementptr inbounds i32* %diff, i64 %9
|
||||
%10 = load i32* %arrayidx27, align 4
|
||||
%11 = or i64 %1, 6
|
||||
%arrayidx30 = getelementptr inbounds i32* %diff, i64 %11
|
||||
%12 = load i32* %arrayidx30, align 4
|
||||
%add31 = add nsw i32 %12, %10
|
||||
%arrayidx34 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
|
||||
store i32 %add31, i32* %arrayidx34, align 8
|
||||
%add38 = add nsw i32 %add24, %add31
|
||||
%13 = or i64 %1, 3
|
||||
%arrayidx41 = getelementptr inbounds i32* %diff, i64 %13
|
||||
%14 = load i32* %arrayidx41, align 4
|
||||
%15 = or i64 %1, 7
|
||||
%arrayidx44 = getelementptr inbounds i32* %diff, i64 %15
|
||||
%16 = load i32* %arrayidx44, align 4
|
||||
%add45 = add nsw i32 %16, %14
|
||||
%arrayidx48 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
|
||||
store i32 %add45, i32* %arrayidx48, align 4
|
||||
%add52 = add nsw i32 %add38, %add45
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond = icmp eq i64 %indvars.iv.next, 8
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
||||
for.end: ; preds = %for.body
|
||||
%arraydecay = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 0
|
||||
call void @ff([8 x i32]* %arraydecay) #1
|
||||
ret i32 %add52
|
||||
}
|
||||
|
||||
declare void @ff([8 x i32]*) #2
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user