mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
21e3dfbc86
- Add patterns for h-register extract, which avoids a shift and mask, and in some cases a temporary register. - Add address-mode matching for turning (X>>(8-n))&(255<<n), where n is a valid address-mode scale value, into an h-register extract and a scaled-offset address. - Replace X86's MOV32to32_ and related instructions with the new target-independent COPY_TO_SUBREG instruction. On x86-64 there are complicated constraints on h registers, and CodeGen doesn't currently provide a high-level way to express all of them, so they are handled with a bunch of special code. This code currently only supports extracts where the result is used by a zero-extend or a store, though these are fairly common. These transformations are not always beneficial; since there are only 4 h registers, they sometimes require extra move instructions, and this sometimes increases register pressure because it can force out values that would otherwise be in one of those registers. However, this appears to be relatively uncommon. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
28 lines
691 B
LLVM
28 lines
691 B
LLVM
; RUN: llvm-as < %s | llc -march=x86-64 > %t
|
|
; RUN: grep mov %t | count 6
|
|
; RUN: grep {movb %ah, (%rsi)} %t | count 3
|
|
; RUN: llvm-as < %s | llc -march=x86 > %t
|
|
; RUN: grep mov %t | count 3
|
|
; RUN: grep {movb %ah, (%e} %t | count 3
|
|
|
|
; Use h-register extract and store.
|
|
|
|
define void @foo16(i16 inreg %p, i8* inreg %z) nounwind {
|
|
%q = lshr i16 %p, 8
|
|
%t = trunc i16 %q to i8
|
|
store i8 %t, i8* %z
|
|
ret void
|
|
}
|
|
define void @foo32(i32 inreg %p, i8* inreg %z) nounwind {
|
|
%q = lshr i32 %p, 8
|
|
%t = trunc i32 %q to i8
|
|
store i8 %t, i8* %z
|
|
ret void
|
|
}
|
|
define void @foo64(i64 inreg %p, i8* inreg %z) nounwind {
|
|
%q = lshr i64 %p, 8
|
|
%t = trunc i64 %q to i8
|
|
store i8 %t, i8* %z
|
|
ret void
|
|
}
|