mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
7c9c6ed761
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
111 lines
3.8 KiB
LLVM
111 lines
3.8 KiB
LLVM
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 | FileCheck %s
|
|
|
|
define <16 x i16> @test_lvm_x86_avx2_pmovsxbw(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_lvm_x86_avx2_pmovsxbw
|
|
; CHECK: vpmovsxbw (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8> %1)
|
|
ret <16 x i16> %2
|
|
}
|
|
|
|
define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbd
|
|
; CHECK: vpmovsxbd (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8> %1)
|
|
ret <8 x i32> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxbq
|
|
; CHECK: vpmovsxbq (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(<8 x i16>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwd
|
|
; CHECK: vpmovsxwd (%rdi), %ymm0
|
|
%1 = load <8 x i16>, <8 x i16>* %a, align 1
|
|
%2 = call <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16> %1)
|
|
ret <8 x i32> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(<8 x i16>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxwq
|
|
; CHECK: vpmovsxwq (%rdi), %ymm0
|
|
%1 = load <8 x i16>, <8 x i16>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(<4 x i32>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovsxdq
|
|
; CHECK: vpmovsxdq (%rdi), %ymm0
|
|
%1 = load <4 x i32>, <4 x i32>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <16 x i16> @test_lvm_x86_avx2_pmovzxbw(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_lvm_x86_avx2_pmovzxbw
|
|
; CHECK: vpmovzxbw (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8> %1)
|
|
ret <16 x i16> %2
|
|
}
|
|
|
|
define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbd
|
|
; CHECK: vpmovzxbd (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8> %1)
|
|
ret <8 x i32> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(<16 x i8>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxbq
|
|
; CHECK: vpmovzxbq (%rdi), %ymm0
|
|
%1 = load <16 x i8>, <16 x i8>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(<8 x i16>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwd
|
|
; CHECK: vpmovzxwd (%rdi), %ymm0
|
|
%1 = load <8 x i16>, <8 x i16>* %a, align 1
|
|
%2 = call <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16> %1)
|
|
ret <8 x i32> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(<8 x i16>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxwq
|
|
; CHECK: vpmovzxwq (%rdi), %ymm0
|
|
%1 = load <8 x i16>, <8 x i16>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(<4 x i32>* %a) {
|
|
; CHECK-LABEL: test_llvm_x86_avx2_pmovzxdq
|
|
; CHECK: vpmovzxdq (%rdi), %ymm0
|
|
%1 = load <4 x i32>, <4 x i32>* %a, align 1
|
|
%2 = call <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32> %1)
|
|
ret <4 x i64> %2
|
|
}
|
|
|
|
declare <4 x i64> @llvm.x86.avx2.pmovzxdq(<4 x i32>)
|
|
declare <4 x i64> @llvm.x86.avx2.pmovzxwq(<8 x i16>)
|
|
declare <8 x i32> @llvm.x86.avx2.pmovzxwd(<8 x i16>)
|
|
declare <4 x i64> @llvm.x86.avx2.pmovzxbq(<16 x i8>)
|
|
declare <8 x i32> @llvm.x86.avx2.pmovzxbd(<16 x i8>)
|
|
declare <16 x i16> @llvm.x86.avx2.pmovzxbw(<16 x i8>)
|
|
declare <4 x i64> @llvm.x86.avx2.pmovsxdq(<4 x i32>)
|
|
declare <4 x i64> @llvm.x86.avx2.pmovsxwq(<8 x i16>)
|
|
declare <8 x i32> @llvm.x86.avx2.pmovsxwd(<8 x i16>)
|
|
declare <4 x i64> @llvm.x86.avx2.pmovsxbq(<16 x i8>)
|
|
declare <8 x i32> @llvm.x86.avx2.pmovsxbd(<16 x i8>)
|
|
declare <16 x i16> @llvm.x86.avx2.pmovsxbw(<16 x i8>)
|