mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-12 17:32:19 +00:00
8fc760cbe8
My recent ARM FastISel patch exposed this bug: http://llvm.org/bugs/show_bug.cgi?id=16178 The root cause is that it can't select integer sext/zext pre-ARMv6 and asserts out. The current integer sext/zext code doesn't handle other cases gracefully either, so this patch makes it handle all sext and zext from i1/i8/i16 to i8/i16/i32, with and without ARMv6, both in Thumb and ARM mode. This should fix the bug as well as make FastISel faster because it bails to SelectionDAG less often. See fastisel-ext.patch for this. fastisel-ext-tests.patch changes current tests to always use reg-imm AND for 8-bit zext instead of UXTB. This simplifies code since it is supported on ARMv4t and later, and at least on A15 both should perform exactly the same (both have exec 1 uop 1, type I). 2013-05-31-char-shift-crash.ll is a bitcode version of the above bug 16178 repro. fast-isel-ext.ll tests all sext/zext combinations that ARM FastISel should now handle. Note that my ARM FastISel enabling patch was reverted due to a separate failure when dealing with MCJIT, I'll fix this second failure and then turn FastISel on again for non-iOS ARM targets. I've tested "make check-all" on my x86 box, and "lnt test-suite" on A15 hardware. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@183551 91177308-0d34-0410-b5e6-96231b3b80d8
135 lines
3.0 KiB
LLVM
135 lines
3.0 KiB
LLVM
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=v7
|
|
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv4t-apple-ios | FileCheck %s --check-prefix=prev6
|
|
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv5-apple-ios | FileCheck %s --check-prefix=prev6
|
|
; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=v7
|
|
|
|
; Can't test pre-ARMv6 Thumb because ARM FastISel currently only supports
|
|
; Thumb2. The ARMFastISel::ARMEmitIntExt code should work for Thumb by always
|
|
; using two shifts.
|
|
|
|
; Note that lsl, asr and lsr in Thumb are all encoded as 16-bit instructions
|
|
; and therefore must set flags. {{s?}} below denotes this, instead of
|
|
; duplicating tests.
|
|
|
|
; zext
|
|
|
|
define i8 @zext_1_8(i1 %a) nounwind ssp {
|
|
; v7: zext_1_8:
|
|
; v7: and r0, r0, #1
|
|
; prev6: zext_1_8:
|
|
; prev6: and r0, r0, #1
|
|
%r = zext i1 %a to i8
|
|
ret i8 %r
|
|
}
|
|
|
|
define i16 @zext_1_16(i1 %a) nounwind ssp {
|
|
; v7: zext_1_16:
|
|
; v7: and r0, r0, #1
|
|
; prev6: zext_1_16:
|
|
; prev6: and r0, r0, #1
|
|
%r = zext i1 %a to i16
|
|
ret i16 %r
|
|
}
|
|
|
|
define i32 @zext_1_32(i1 %a) nounwind ssp {
|
|
; v7: zext_1_32:
|
|
; v7: and r0, r0, #1
|
|
; prev6: zext_1_32:
|
|
; prev6: and r0, r0, #1
|
|
%r = zext i1 %a to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i16 @zext_8_16(i8 %a) nounwind ssp {
|
|
; v7: zext_8_16:
|
|
; v7: and r0, r0, #255
|
|
; prev6: zext_8_16:
|
|
; prev6: and r0, r0, #255
|
|
%r = zext i8 %a to i16
|
|
ret i16 %r
|
|
}
|
|
|
|
define i32 @zext_8_32(i8 %a) nounwind ssp {
|
|
; v7: zext_8_32:
|
|
; v7: and r0, r0, #255
|
|
; prev6: zext_8_32:
|
|
; prev6: and r0, r0, #255
|
|
%r = zext i8 %a to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @zext_16_32(i16 %a) nounwind ssp {
|
|
; v7: zext_16_32:
|
|
; v7: uxth r0, r0
|
|
; prev6: zext_16_32:
|
|
; prev6: lsl{{s?}} r0, r0, #16
|
|
; prev6: lsr{{s?}} r0, r0, #16
|
|
%r = zext i16 %a to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; sext
|
|
|
|
define i8 @sext_1_8(i1 %a) nounwind ssp {
|
|
; v7: sext_1_8:
|
|
; v7: lsl{{s?}} r0, r0, #31
|
|
; v7: asr{{s?}} r0, r0, #31
|
|
; prev6: sext_1_8:
|
|
; prev6: lsl{{s?}} r0, r0, #31
|
|
; prev6: asr{{s?}} r0, r0, #31
|
|
%r = sext i1 %a to i8
|
|
ret i8 %r
|
|
}
|
|
|
|
define i16 @sext_1_16(i1 %a) nounwind ssp {
|
|
; v7: sext_1_16:
|
|
; v7: lsl{{s?}} r0, r0, #31
|
|
; v7: asr{{s?}} r0, r0, #31
|
|
; prev6: sext_1_16:
|
|
; prev6: lsl{{s?}} r0, r0, #31
|
|
; prev6: asr{{s?}} r0, r0, #31
|
|
%r = sext i1 %a to i16
|
|
ret i16 %r
|
|
}
|
|
|
|
define i32 @sext_1_32(i1 %a) nounwind ssp {
|
|
; v7: sext_1_32:
|
|
; v7: lsl{{s?}} r0, r0, #31
|
|
; v7: asr{{s?}} r0, r0, #31
|
|
; prev6: sext_1_32:
|
|
; prev6: lsl{{s?}} r0, r0, #31
|
|
; prev6: asr{{s?}} r0, r0, #31
|
|
%r = sext i1 %a to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i16 @sext_8_16(i8 %a) nounwind ssp {
|
|
; v7: sext_8_16:
|
|
; v7: sxtb r0, r0
|
|
; prev6: sext_8_16:
|
|
; prev6: lsl{{s?}} r0, r0, #24
|
|
; prev6: asr{{s?}} r0, r0, #24
|
|
%r = sext i8 %a to i16
|
|
ret i16 %r
|
|
}
|
|
|
|
define i32 @sext_8_32(i8 %a) nounwind ssp {
|
|
; v7: sext_8_32:
|
|
; v7: sxtb r0, r0
|
|
; prev6: sext_8_32:
|
|
; prev6: lsl{{s?}} r0, r0, #24
|
|
; prev6: asr{{s?}} r0, r0, #24
|
|
%r = sext i8 %a to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @sext_16_32(i16 %a) nounwind ssp {
|
|
; v7: sext_16_32:
|
|
; v7: sxth r0, r0
|
|
; prev6: sext_16_32:
|
|
; prev6: lsl{{s?}} r0, r0, #16
|
|
; prev6: asr{{s?}} r0, r0, #16
|
|
%r = sext i16 %a to i32
|
|
ret i32 %r
|
|
}
|