mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-29 10:25:12 +00:00
Add several testcases for new optimizations in the code generator.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19244 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
21
test/CodeGen/X86/commute-two-addr.ll
Normal file
21
test/CodeGen/X86/commute-two-addr.ll
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
; The register allocator can commute two-address instructions to avoid
|
||||||
|
; insertion of register-register copies.
|
||||||
|
|
||||||
|
; Check that there are no register-register copies left.
|
||||||
|
; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
|
||||||
|
|
||||||
|
%G = external global int
|
||||||
|
|
||||||
|
declare void %ext(int)
|
||||||
|
|
||||||
|
int %add_test(int %X, int %Y) {
|
||||||
|
%Z = add int %X, %Y ;; Last use of Y, but not of X.
|
||||||
|
store int %Z, int* %G
|
||||||
|
ret int %X
|
||||||
|
}
|
||||||
|
|
||||||
|
int %xor_test(int %X, int %Y) {
|
||||||
|
%Z = xor int %X, %Y ;; Last use of Y, but not of X.
|
||||||
|
store int %Z, int* %G
|
||||||
|
ret int %X
|
||||||
|
}
|
24
test/CodeGen/X86/overlap-add.ll
Normal file
24
test/CodeGen/X86/overlap-add.ll
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
;; X's live range extends beyond the shift, so the register allocator
|
||||||
|
;; cannot coallesce it with Y. Because of this, a copy needs to be
|
||||||
|
;; emitted before the shift to save the register value before it is
|
||||||
|
;; clobbered. However, this copy is not needed if the register
|
||||||
|
;; allocator turns the shift into an LEA. This also occurs for ADD.
|
||||||
|
|
||||||
|
; Check that the shift gets turned into an LEA.
|
||||||
|
|
||||||
|
; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
|
||||||
|
|
||||||
|
%G = external global int
|
||||||
|
|
||||||
|
int %test1(int %X, int %Y) {
|
||||||
|
%Z = add int %X, %Y
|
||||||
|
volatile store int %Y, int* %G
|
||||||
|
volatile store int %Z, int* %G
|
||||||
|
ret int %X
|
||||||
|
}
|
||||||
|
|
||||||
|
int %test2(int %X) {
|
||||||
|
%Z = add int %X, 1 ;; inc
|
||||||
|
volatile store int %Z, int* %G
|
||||||
|
ret int %X
|
||||||
|
}
|
17
test/CodeGen/X86/overlap-shift.ll
Normal file
17
test/CodeGen/X86/overlap-shift.ll
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
;; X's live range extends beyond the shift, so the register allocator
|
||||||
|
;; cannot coallesce it with Y. Because of this, a copy needs to be
|
||||||
|
;; emitted before the shift to save the register value before it is
|
||||||
|
;; clobbered. However, this copy is not needed if the register
|
||||||
|
;; allocator turns the shift into an LEA. This also occurs for ADD.
|
||||||
|
|
||||||
|
; Check that the shift gets turned into an LEA.
|
||||||
|
|
||||||
|
; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | not grep 'mov %E.X, %E.X'
|
||||||
|
|
||||||
|
%G = external global int
|
||||||
|
|
||||||
|
int %test1(int %X) {
|
||||||
|
%Z = shl int %X, ubyte 2
|
||||||
|
volatile store int %Z, int* %G
|
||||||
|
ret int %X
|
||||||
|
}
|
Reference in New Issue
Block a user