llvm-6502/test/CodeGen/SPARC/basictest.ll
James Y Knight 8eb1aaac9c [SPARC] Cleanup handling of the Y/ASR registers.
- Implement copying ASR to/from GPR regs.
- Mark ASRs as non-allocatable, so it won't try to arbitrarily use
  them inappropriately.
- Instead of inserting explicit WRASR/RDASR nodes in the MUL/DIV
  routines, just do normal register copies.
- Also...mark div as using Y, not just writing it.

Added a test case with some code which previously died with an
assertion failure (with -O0), or produced wrong code (otherwise).

(Third time's the charm?)

Differential Revision: http://reviews.llvm.org/D10401

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241686 91177308-0d34-0410-b5e6-96231b3b80d8
2015-07-08 16:25:12 +00:00

87 lines
1.8 KiB
LLVM

; RUN: llc < %s -march=sparc | FileCheck %s
define i32 @test0(i32 %X) {
%tmp.1 = add i32 %X, 1
ret i32 %tmp.1
; CHECK-LABEL: test0:
; CHECK: add %o0, 1, %o0
}
;; xnor tests.
define i32 @test1(i32 %X, i32 %Y) {
%A = xor i32 %X, %Y
%B = xor i32 %A, -1
ret i32 %B
; CHECK-LABEL: test1:
; CHECK: xnor %o0, %o1, %o0
}
define i32 @test2(i32 %X, i32 %Y) {
%A = xor i32 %X, -1
%B = xor i32 %A, %Y
ret i32 %B
; CHECK-LABEL: test2:
; CHECK: xnor %o0, %o1, %o0
}
; CHECK-LABEL: store_zero:
; CHECK: st %g0, [%o0]
; CHECK: st %g0, [%o1+4]
define i32 @store_zero(i32* %a, i32* %b) {
entry:
store i32 0, i32* %a, align 4
%0 = getelementptr inbounds i32, i32* %b, i32 1
store i32 0, i32* %0, align 4
ret i32 0
}
; CHECK-LABEL: signed_divide:
; CHECK: sra %o0, 31, %o2
; CHECK: wr %g0, %o2, %y
; CHECK: sdiv %o0, %o1, %o0
define i32 @signed_divide(i32 %a, i32 %b) {
%r = sdiv i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: unsigned_divide:
; CHECK: wr %g0, %g0, %y
; CHECK: udiv %o0, %o1, %o0
define i32 @unsigned_divide(i32 %a, i32 %b) {
%r = udiv i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: multiply_32x32:
; CHECK: smul %o0, %o1, %o0
define i32 @multiply_32x32(i32 %a, i32 %b) {
%r = mul i32 %a, %b
ret i32 %r
}
; CHECK-LABEL: signed_multiply_32x32_64:
; CHECK: smul %o0, %o1, %o1
; CHECK: rd %y, %o0
define i64 @signed_multiply_32x32_64(i32 %a, i32 %b) {
%xa = sext i32 %a to i64
%xb = sext i32 %b to i64
%r = mul i64 %xa, %xb
ret i64 %r
}
; CHECK-LABEL: unsigned_multiply_32x32_64:
; CHECK: umul %o0, %o1, %o2
; CHECK: rd %y, %o2
;FIXME: the smul in the output is totally redundant and should not there.
; CHECK: smul %o0, %o1, %o1
; CHECK: retl
; CHECK: mov %o2, %o0
define i64 @unsigned_multiply_32x32_64(i32 %a, i32 %b) {
%xa = zext i32 %a to i64
%xb = zext i32 %b to i64
%r = mul i64 %xa, %xb
ret i64 %r
}