mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-19 17:37:24 +00:00
21e3dfbc86
- Add patterns for h-register extract, which avoids a shift and mask, and in some cases a temporary register. - Add address-mode matching for turning (X>>(8-n))&(255<<n), where n is a valid address-mode scale value, into an h-register extract and a scaled-offset address. - Replace X86's MOV32to32_ and related instructions with the new target-independent COPY_TO_SUBREG instruction. On x86-64 there are complicated constraints on h registers, and CodeGen doesn't currently provide a high-level way to express all of them, so they are handled with a bunch of special code. This code currently only supports extracts where the result is used by a zero-extend or a store, though these are fairly common. These transformations are not always beneficial; since there are only 4 h registers, they sometimes require extra move instructions, and this sometimes increases register pressure because it can force out values that would otherwise be in one of those registers. However, this appears to be relatively uncommon. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
54 lines
1.6 KiB
LLVM
54 lines
1.6 KiB
LLVM
; RUN: llvm-as < %s | llc -march=x86 | grep {movzbl %\[abcd\]h,} | count 7
|
|
|
|
; Use h-register extract and zero-extend.
|
|
|
|
define double @foo8(double* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 8
|
|
%t1 = and i32 %t0, 255
|
|
%t2 = getelementptr double* %p, i32 %t1
|
|
%t3 = load double* %t2, align 8
|
|
ret double %t3
|
|
}
|
|
define float @foo4(float* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 8
|
|
%t1 = and i32 %t0, 255
|
|
%t2 = getelementptr float* %p, i32 %t1
|
|
%t3 = load float* %t2, align 8
|
|
ret float %t3
|
|
}
|
|
define i16 @foo2(i16* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 8
|
|
%t1 = and i32 %t0, 255
|
|
%t2 = getelementptr i16* %p, i32 %t1
|
|
%t3 = load i16* %t2, align 8
|
|
ret i16 %t3
|
|
}
|
|
define i8 @foo1(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 8
|
|
%t1 = and i32 %t0, 255
|
|
%t2 = getelementptr i8* %p, i32 %t1
|
|
%t3 = load i8* %t2, align 8
|
|
ret i8 %t3
|
|
}
|
|
define i8 @bar8(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 5
|
|
%t1 = and i32 %t0, 2040
|
|
%t2 = getelementptr i8* %p, i32 %t1
|
|
%t3 = load i8* %t2, align 8
|
|
ret i8 %t3
|
|
}
|
|
define i8 @bar4(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 6
|
|
%t1 = and i32 %t0, 1020
|
|
%t2 = getelementptr i8* %p, i32 %t1
|
|
%t3 = load i8* %t2, align 8
|
|
ret i8 %t3
|
|
}
|
|
define i8 @bar2(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
|
|
%t0 = lshr i32 %x, 7
|
|
%t1 = and i32 %t0, 510
|
|
%t2 = getelementptr i8* %p, i32 %t1
|
|
%t3 = load i8* %t2, align 8
|
|
ret i8 %t3
|
|
}
|