llvm-6502/test/CodeGen/X86/pmovsx-inreg.ll
David Blaikie 7c9c6ed761 [opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.

A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)

import fileinput
import sys
import re

pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")

for line in sys.stdin:
  sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))

Reviewers: rafael, dexonsmith, grosser

Differential Revision: http://reviews.llvm.org/D7649

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
2015-02-27 21:17:42 +00:00

176 lines
4.7 KiB
LLVM

; RUN: llc < %s -march=x86-64 -mcpu=penryn | FileCheck -check-prefix=SSE41 %s
; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck -check-prefix=AVX1 %s
; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck -check-prefix=AVX2 %s
; PR14887
; These tests inject a store into the chain to test the inreg versions of pmovsx
define void @test1(<2 x i8>* %in, <2 x i64>* %out) nounwind {
%wide.load35 = load <2 x i8>, <2 x i8>* %in, align 1
%sext = sext <2 x i8> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
ret void
; SSE41-LABEL: test1:
; SSE41: pmovsxbq
; AVX1-LABEL: test1:
; AVX1: vpmovsxbq
; AVX2-LABEL: test1:
; AVX2: vpmovsxbq
}
define void @test2(<4 x i8>* %in, <4 x i64>* %out) nounwind {
%wide.load35 = load <4 x i8>, <4 x i8>* %in, align 1
%sext = sext <4 x i8> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
ret void
; AVX2-LABEL: test2:
; AVX2: vpmovsxbq
}
define void @test3(<4 x i8>* %in, <4 x i32>* %out) nounwind {
%wide.load35 = load <4 x i8>, <4 x i8>* %in, align 1
%sext = sext <4 x i8> %wide.load35 to <4 x i32>
store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
store <4 x i32> %sext, <4 x i32>* %out, align 8
ret void
; SSE41-LABEL: test3:
; SSE41: pmovsxbd
; AVX1-LABEL: test3:
; AVX1: vpmovsxbd
; AVX2-LABEL: test3:
; AVX2: vpmovsxbd
}
define void @test4(<8 x i8>* %in, <8 x i32>* %out) nounwind {
%wide.load35 = load <8 x i8>, <8 x i8>* %in, align 1
%sext = sext <8 x i8> %wide.load35 to <8 x i32>
store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
store <8 x i32> %sext, <8 x i32>* %out, align 8
ret void
; AVX2-LABEL: test4:
; AVX2: vpmovsxbd
}
define void @test5(<8 x i8>* %in, <8 x i16>* %out) nounwind {
%wide.load35 = load <8 x i8>, <8 x i8>* %in, align 1
%sext = sext <8 x i8> %wide.load35 to <8 x i16>
store <8 x i16> zeroinitializer, <8 x i16>* undef, align 8
store <8 x i16> %sext, <8 x i16>* %out, align 8
ret void
; SSE41-LABEL: test5:
; SSE41: pmovsxbw
; AVX1-LABEL: test5:
; AVX1: vpmovsxbw
; AVX2-LABEL: test5:
; AVX2: vpmovsxbw
}
define void @test6(<16 x i8>* %in, <16 x i16>* %out) nounwind {
%wide.load35 = load <16 x i8>, <16 x i8>* %in, align 1
%sext = sext <16 x i8> %wide.load35 to <16 x i16>
store <16 x i16> zeroinitializer, <16 x i16>* undef, align 8
store <16 x i16> %sext, <16 x i16>* %out, align 8
ret void
; AVX2-LABEL: test6:
; AVX2: vpmovsxbw
}
define void @test7(<2 x i16>* %in, <2 x i64>* %out) nounwind {
%wide.load35 = load <2 x i16>, <2 x i16>* %in, align 1
%sext = sext <2 x i16> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
ret void
; SSE41-LABEL: test7:
; SSE41: pmovsxwq
; AVX1-LABEL: test7:
; AVX1: vpmovsxwq
; AVX2-LABEL: test7:
; AVX2: vpmovsxwq
}
define void @test8(<4 x i16>* %in, <4 x i64>* %out) nounwind {
%wide.load35 = load <4 x i16>, <4 x i16>* %in, align 1
%sext = sext <4 x i16> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
ret void
; AVX2-LABEL: test8:
; AVX2: vpmovsxwq
}
define void @test9(<4 x i16>* %in, <4 x i32>* %out) nounwind {
%wide.load35 = load <4 x i16>, <4 x i16>* %in, align 1
%sext = sext <4 x i16> %wide.load35 to <4 x i32>
store <4 x i32> zeroinitializer, <4 x i32>* undef, align 8
store <4 x i32> %sext, <4 x i32>* %out, align 8
ret void
; SSE41-LABEL: test9:
; SSE41: pmovsxwd
; AVX1-LABEL: test9:
; AVX1: vpmovsxwd
; AVX2-LABEL: test9:
; AVX2: vpmovsxwd
}
define void @test10(<8 x i16>* %in, <8 x i32>* %out) nounwind {
%wide.load35 = load <8 x i16>, <8 x i16>* %in, align 1
%sext = sext <8 x i16> %wide.load35 to <8 x i32>
store <8 x i32> zeroinitializer, <8 x i32>* undef, align 8
store <8 x i32> %sext, <8 x i32>* %out, align 8
ret void
; AVX2-LABEL: test10:
; AVX2: vpmovsxwd
}
define void @test11(<2 x i32>* %in, <2 x i64>* %out) nounwind {
%wide.load35 = load <2 x i32>, <2 x i32>* %in, align 1
%sext = sext <2 x i32> %wide.load35 to <2 x i64>
store <2 x i64> zeroinitializer, <2 x i64>* undef, align 8
store <2 x i64> %sext, <2 x i64>* %out, align 8
ret void
; SSE41-LABEL: test11:
; SSE41: pmovsxdq
; AVX1-LABEL: test11:
; AVX1: vpmovsxdq
; AVX2-LABEL: test11:
; AVX2: vpmovsxdq
}
define void @test12(<4 x i32>* %in, <4 x i64>* %out) nounwind {
%wide.load35 = load <4 x i32>, <4 x i32>* %in, align 1
%sext = sext <4 x i32> %wide.load35 to <4 x i64>
store <4 x i64> zeroinitializer, <4 x i64>* undef, align 8
store <4 x i64> %sext, <4 x i64>* %out, align 8
ret void
; AVX2-LABEL: test12:
; AVX2: vpmovsxdq
}