mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-23 15:29:51 +00:00
832254e1c2
This feature is needed in order to support shifts of more than 255 bits on large integer types. This changes the syntax for llvm assembly to make shl, ashr and lshr instructions look like a binary operator: shl i32 %X, 1 instead of shl i32 %X, i8 1 Additionally, this should help a few passes perform additional optimizations. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@33776 91177308-0d34-0410-b5e6-96231b3b80d8
20 lines
451 B
LLVM
20 lines
451 B
LLVM
; RUN: llvm-as < %s | llc -march=ppc32 &&
|
|
; RUN: llvm-as < %s | llc -march=ppc32 | not grep rlwin
|
|
|
|
define void @test(i8* %P) {
|
|
%W = load i8* %P
|
|
%X = shl i8 %W, 1
|
|
%Y = add i8 %X, 2
|
|
%Z = and i8 %Y, 254 ; dead and
|
|
store i8 %Z, i8* %P
|
|
ret void
|
|
}
|
|
|
|
define i16 @test2(i16 zext %crc) zext {
|
|
; No and's should be needed for the i16s here.
|
|
%tmp.1 = lshr i16 %crc, 1
|
|
%tmp.7 = xor i16 %tmp.1, 40961
|
|
ret i16 %tmp.7
|
|
}
|
|
|