mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-29 10:32:47 +00:00
58e87a68a8
The BFE optimization was the only one we were actually using, and it was emitting an intrinsic that we don't support. https://bugs.freedesktop.org/show_bug.cgi?id=64201 Reviewed-by: Christian König <christian.koenig@amd.com> NOTE: This is a candidate for the 3.3 branch. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181580 91177308-0d34-0410-b5e6-96231b3b80d8
27 lines
746 B
LLVM
27 lines
746 B
LLVM
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s
|
|
|
|
; CHECK: @bfe_def
|
|
; CHECK: BFE_UINT
|
|
define void @bfe_def(i32 addrspace(1)* %out, i32 %x) {
|
|
entry:
|
|
%0 = lshr i32 %x, 5
|
|
%1 = and i32 %0, 15 ; 0xf
|
|
store i32 %1, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
; This program could be implemented using a BFE_UINT instruction, however
|
|
; since the lshr constant + number of bits in the mask is >= 32, it can also be
|
|
; implmented with a LSHR instruction, which is better, because LSHR has less
|
|
; operands and requires less constants.
|
|
|
|
; CHECK: @bfe_shift
|
|
; CHECK-NOT: BFE_UINT
|
|
define void @bfe_shift(i32 addrspace(1)* %out, i32 %x) {
|
|
entry:
|
|
%0 = lshr i32 %x, 16
|
|
%1 = and i32 %0, 65535 ; 0xffff
|
|
store i32 %1, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|