mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-28 04:33:05 +00:00
1f65cfad96
Generalize the AArch64 .td nodes for AssertZext and AssertSext. Use them to match the relevant pextr store instructions. The test widen_load-2.ll requires a slight change because with the stores gone, the remaining instructions are scheduled in a different order. Add test cases for SSE4 and AVX variants. Resolves rdar://13414672. Patch by Adam Nemet <anemet@apple.com>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200957 91177308-0d34-0410-b5e6-96231b3b80d8
23 lines
646 B
LLVM
23 lines
646 B
LLVM
; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+sse4.1 | FileCheck %s -check-prefix=SSE41
|
|
; RUN: llc < %s -o - -mcpu=generic -march=x86-64 -mattr=+avx | FileCheck %s -check-prefix=AVX
|
|
|
|
define void @pextrb(i8* nocapture %dst, <16 x i8> %foo) {
|
|
; AVX: vpextrb
|
|
; SSE41: pextrb
|
|
; AVX-NOT: movb
|
|
; SSE41-NOT: movb
|
|
%vecext = extractelement <16 x i8> %foo, i32 15
|
|
store i8 %vecext, i8* %dst, align 1
|
|
ret void
|
|
}
|
|
|
|
define void @pextrw(i16* nocapture %dst, <8 x i16> %foo) {
|
|
; AVX: vpextrw
|
|
; SSE41: pextrw
|
|
; AVX-NOT: movw
|
|
; SSE41-NOT: movw
|
|
%vecext = extractelement <8 x i16> %foo, i32 15
|
|
store i16 %vecext, i16* %dst, align 1
|
|
ret void
|
|
}
|