mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-22 10:33:23 +00:00
3d476a80e9
Unaligned vldm/vstm need more uops and therefore are slower in general on swift. radar://14522102 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189961 91177308-0d34-0410-b5e6-96231b3b80d8
29 lines
940 B
LLVM
29 lines
940 B
LLVM
; RUN: llc < %s -mcpu=swift -mtriple=armv7s-apple-ios | FileCheck %s
|
|
|
|
; vldm with registers not aligned with q registers need more micro-ops so that
|
|
; so that there usage becomes unbeneficial on swift.
|
|
|
|
; CHECK-LABEL: test_vldm
|
|
; CHECK: vldmia r1, {d18, d19, d20}
|
|
; CHECK-NOT: vldmia r1, {d17, d18, d19, d20}
|
|
|
|
define double @test_vldm(double %a, double %b, double* nocapture %x) {
|
|
entry:
|
|
%mul73 = fmul double %a, %b
|
|
%addr1 = getelementptr double * %x, i32 1
|
|
%addr2 = getelementptr double * %x, i32 2
|
|
%addr3 = getelementptr double * %x, i32 3
|
|
%load0 = load double * %x
|
|
%load1 = load double * %addr1
|
|
%load2 = load double * %addr2
|
|
%load3 = load double * %addr3
|
|
%sub = fsub double %mul73, %load1
|
|
%mul = fmul double %mul73, %load0
|
|
%add = fadd double %mul73, %load2
|
|
%div = fdiv double %mul73, %load3
|
|
%red = fadd double %sub, %mul
|
|
%red2 = fadd double %div, %add
|
|
%red3 = fsub double %red, %red2
|
|
ret double %red3
|
|
}
|