mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-21 00:32:23 +00:00
f08cddcf56
Note: This was originally reverted to track down a buildbot error. This commit exposed a latent bug that was fixed in r215753. Therefore it is reapplied without any modifications. I run it through SPEC2k and SPEC2k6 for AArch64 and it didn't introduce any new regeressions. Original commit message: This changes the order in which FastISel tries to materialize a constant. Originally it would try to use a simple target-independent approach, which can lead to the generation of inefficient code. On X86 this would result in the use of movabsq to materialize any 64bit integer constant - even for simple and small values such as 0 and 1. Also some very funny floating-point materialization could be observed too. On AArch64 it would materialize the constant 0 in a register even the architecture has an actual "zero" register. On ARM it would generate unnecessary mov instructions or not use mvn. This change simply changes the order and always asks the target first if it likes to materialize the constant. This doesn't fix all the issues mentioned above, but it enables the targets to implement such optimizations. Related to <rdar://problem/17420988>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216006 91177308-0d34-0410-b5e6-96231b3b80d8
47 lines
1.6 KiB
LLVM
47 lines
1.6 KiB
LLVM
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
|
|
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi | FileCheck %s --check-prefix=ARM
|
|
; RUN: llc < %s -O0 -verify-machineinstrs -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
|
|
|
|
define i32 @VarArg() nounwind {
|
|
entry:
|
|
%i = alloca i32, align 4
|
|
%j = alloca i32, align 4
|
|
%k = alloca i32, align 4
|
|
%m = alloca i32, align 4
|
|
%n = alloca i32, align 4
|
|
%tmp = alloca i32, align 4
|
|
%0 = load i32* %i, align 4
|
|
%1 = load i32* %j, align 4
|
|
%2 = load i32* %k, align 4
|
|
%3 = load i32* %m, align 4
|
|
%4 = load i32* %n, align 4
|
|
; ARM: VarArg
|
|
; ARM: mov [[FP:r[0-9]+]], sp
|
|
; ARM: sub sp, sp, #32
|
|
; ARM: movw r0, #5
|
|
; ARM: ldr r1, {{\[}}[[FP]], #-4]
|
|
; ARM: ldr r2, {{\[}}[[FP]], #-8]
|
|
; ARM: ldr r3, {{\[}}[[FP]], #-12]
|
|
; ARM: ldr [[Ra:r[0-9]+]], [sp, #16]
|
|
; ARM: ldr [[Rb:[lr]+[0-9]*]], [sp, #12]
|
|
; ARM: str [[Ra]], [sp]
|
|
; ARM: str [[Rb]], [sp, #4]
|
|
; ARM: bl {{_?CallVariadic}}
|
|
; THUMB: sub sp, #32
|
|
; THUMB: movs r0, #5
|
|
; THUMB: ldr r1, [sp, #28]
|
|
; THUMB: ldr r2, [sp, #24]
|
|
; THUMB: ldr r3, [sp, #20]
|
|
; THUMB: ldr.w {{[a-z0-9]+}}, [sp, #16]
|
|
; THUMB: ldr.w {{[a-z0-9]+}}, [sp, #12]
|
|
; THUMB: str.w {{[a-z0-9]+}}, [sp]
|
|
; THUMB: str.w {{[a-z0-9]+}}, [sp, #4]
|
|
; THUMB: bl {{_?}}CallVariadic
|
|
%call = call i32 (i32, ...)* @CallVariadic(i32 5, i32 %0, i32 %1, i32 %2, i32 %3, i32 %4)
|
|
store i32 %call, i32* %tmp, align 4
|
|
%5 = load i32* %tmp, align 4
|
|
ret i32 %5
|
|
}
|
|
|
|
declare i32 @CallVariadic(i32, ...)
|