mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-09 13:33:17 +00:00
My recent ARM FastISel patch exposed this bug: http://llvm.org/bugs/show_bug.cgi?id=16178 The root cause is that it can't select integer sext/zext pre-ARMv6 and asserts out. The current integer sext/zext code doesn't handle other cases gracefully either, so this patch makes it handle all sext and zext from i1/i8/i16 to i8/i16/i32, with and without ARMv6, both in Thumb and ARM mode. This should fix the bug as well as make FastISel faster because it bails to SelectionDAG less often. See fastisel-ext.patch for this. fastisel-ext-tests.patch changes current tests to always use reg-imm AND for 8-bit zext instead of UXTB. This simplifies code since it is supported on ARMv4t and later, and at least on A15 both should perform exactly the same (both have exec 1 uop 1, type I). 2013-05-31-char-shift-crash.ll is a bitcode version of the above bug 16178 repro. fast-isel-ext.ll tests all sext/zext combinations that ARM FastISel should now handle. Note that my ARM FastISel enabling patch was reverted due to a separate failure when dealing with MCJIT, I'll fix this second failure and then turn FastISel on again for non-iOS ARM targets. I've tested "make check-all" on my x86 box, and "lnt test-suite" on A15 hardware. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@183551 91177308-0d34-0410-b5e6-96231b3b80d8
78 lines
1.8 KiB
LLVM
78 lines
1.8 KiB
LLVM
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
|
|
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
|
|
|
|
define i32 @icmp_i16_signed(i16 %a, i16 %b) nounwind {
|
|
entry:
|
|
; ARM: icmp_i16_signed
|
|
; ARM: sxth r0, r0
|
|
; ARM: sxth r1, r1
|
|
; ARM: cmp r0, r1
|
|
; THUMB: icmp_i16_signed
|
|
; THUMB: sxth r0, r0
|
|
; THUMB: sxth r1, r1
|
|
; THUMB: cmp r0, r1
|
|
%cmp = icmp slt i16 %a, %b
|
|
%conv2 = zext i1 %cmp to i32
|
|
ret i32 %conv2
|
|
}
|
|
|
|
define i32 @icmp_i16_unsigned(i16 %a, i16 %b) nounwind {
|
|
entry:
|
|
; ARM: icmp_i16_unsigned
|
|
; ARM: uxth r0, r0
|
|
; ARM: uxth r1, r1
|
|
; ARM: cmp r0, r1
|
|
; THUMB: icmp_i16_unsigned
|
|
; THUMB: uxth r0, r0
|
|
; THUMB: uxth r1, r1
|
|
; THUMB: cmp r0, r1
|
|
%cmp = icmp ult i16 %a, %b
|
|
%conv2 = zext i1 %cmp to i32
|
|
ret i32 %conv2
|
|
}
|
|
|
|
define i32 @icmp_i8_signed(i8 %a, i8 %b) nounwind {
|
|
entry:
|
|
; ARM: icmp_i8_signed
|
|
; ARM: sxtb r0, r0
|
|
; ARM: sxtb r1, r1
|
|
; ARM: cmp r0, r1
|
|
; THUMB: icmp_i8_signed
|
|
; THUMB: sxtb r0, r0
|
|
; THUMB: sxtb r1, r1
|
|
; THUMB: cmp r0, r1
|
|
%cmp = icmp sgt i8 %a, %b
|
|
%conv2 = zext i1 %cmp to i32
|
|
ret i32 %conv2
|
|
}
|
|
|
|
define i32 @icmp_i8_unsigned(i8 %a, i8 %b) nounwind {
|
|
entry:
|
|
; ARM: icmp_i8_unsigned
|
|
; ARM: and r0, r0, #255
|
|
; ARM: and r1, r1, #255
|
|
; ARM: cmp r0, r1
|
|
; THUMB: icmp_i8_unsigned
|
|
; THUMB: and r0, r0, #255
|
|
; THUMB: and r1, r1, #255
|
|
; THUMB: cmp r0, r1
|
|
%cmp = icmp ugt i8 %a, %b
|
|
%conv2 = zext i1 %cmp to i32
|
|
ret i32 %conv2
|
|
}
|
|
|
|
define i32 @icmp_i1_unsigned(i1 %a, i1 %b) nounwind {
|
|
entry:
|
|
; ARM: icmp_i1_unsigned
|
|
; ARM: and r0, r0, #1
|
|
; ARM: and r1, r1, #1
|
|
; ARM: cmp r0, r1
|
|
; THUMB: icmp_i1_unsigned
|
|
; THUMB: and r0, r0, #1
|
|
; THUMB: and r1, r1, #1
|
|
; THUMB: cmp r0, r1
|
|
%cmp = icmp ult i1 %a, %b
|
|
%conv2 = zext i1 %cmp to i32
|
|
ret i32 %conv2
|
|
}
|