mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-17 18:31:04 +00:00
[x86] Regenerate this test case now that I've improved my script for
generating the test cases to format things more consistently and actually catch all the operand sequences that should be elided in favor of the asm comments. No actual changes here. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218210 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
fdaf59e9b1
commit
e4cb9d5f25
@ -135,7 +135,7 @@ define <8 x float> @shuffle_v8f32_00001111(<8 x float> %a, <8 x float> %b) {
|
||||
define <8 x float> @shuffle_v8f32_81a3c5e7(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL-LABEL: @shuffle_v8f32_81a3c5e7
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vblendps $85, %ymm1, %ymm0, %ymm0 # ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
|
||||
; ALL-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[3],ymm1[4],ymm0[5],ymm1[6],ymm0[7]
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
|
||||
ret <8 x float> %shuffle
|
||||
@ -146,7 +146,7 @@ define <8 x float> @shuffle_v8f32_08080808(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm1 = xmm1[0,0,2,0]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,1,0,3]
|
||||
; ALL-NEXT: vblendps $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 0, i32 8, i32 0, i32 8>
|
||||
@ -164,7 +164,7 @@ define <8 x float> @shuffle_v8f32_08084c4c(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,1,0,3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
|
||||
; ALL-NEXT: vblendps $-86, %ymm1, %ymm0, %ymm0 # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
||||
; ALL-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 0, i32 8, i32 4, i32 12, i32 4, i32 12>
|
||||
ret <8 x float> %shuffle
|
||||
@ -222,10 +222,10 @@ define <8 x float> @shuffle_v8f32_08192a3b(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm2 = xmm1[0,2,2,3]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm3 = xmm0[2,1,3,3]
|
||||
; ALL-NEXT: vblendps $10, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm1 = xmm1[0,0,2,1]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,1,1,3]
|
||||
; ALL-NEXT: vblendps $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
|
||||
@ -237,9 +237,9 @@ define <8 x float> @shuffle_v8f32_08991abb(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm2 = xmm0[1,1,2,3]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm3 = xmm1[0,2,3,3]
|
||||
; ALL-NEXT: vblendps $1, %xmm2, %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[1,2,3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm2 = xmm2[0],xmm3[1,2,3]
|
||||
; ALL-NEXT: vunpcklps {{.*}} # xmm1 = xmm1[0,0,1,1]
|
||||
; ALL-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 8, i32 9, i32 9, i32 1, i32 10, i32 11, i32 11>
|
||||
@ -251,9 +251,9 @@ define <8 x float> @shuffle_v8f32_091b2d3f(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm3 = xmm0[2,1,3,3]
|
||||
; ALL-NEXT: vblendps $10, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3]
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm0 = xmm0[0,1,1,3]
|
||||
; ALL-NEXT: vblendps $10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 1, i32 11, i32 2, i32 13, i32 3, i32 15>
|
||||
@ -265,8 +265,8 @@ define <8 x float> @shuffle_v8f32_09ab1def(<8 x float> %a, <8 x float> %b) {
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; ALL-NEXT: vpermilps {{.*}} # xmm3 = xmm0[1,1,2,3]
|
||||
; ALL-NEXT: vblendps $1, %xmm3, %xmm2, %xmm2 # xmm2 = xmm3[0],xmm2[1,2,3]
|
||||
; ALL-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm2 = xmm3[0],xmm2[1,2,3]
|
||||
; ALL-NEXT: vblendps {{.*}} # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; ALL-NEXT: retq
|
||||
%shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 1, i32 13, i32 14, i32 15>
|
||||
|
Loading…
Reference in New Issue
Block a user