mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
Fix undefined behavior in vector shift tests.
These were all shifting the same amount as the bitwidth. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203519 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
bea18e3849
commit
53131629dc
@ -306,8 +306,8 @@ define <1 x i16> @ashr.v1i16.imm(<1 x i16> %a) {
|
||||
|
||||
define <1 x i32> @ashr.v1i32.imm(<1 x i32> %a) {
|
||||
; CHECK-LABEL: ashr.v1i32.imm:
|
||||
; CHECK: sshr v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #32
|
||||
%c = ashr <1 x i32> %a, <i32 32>
|
||||
; CHECK: sshr v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #31
|
||||
%c = ashr <1 x i32> %a, <i32 31>
|
||||
ret <1 x i32> %c
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ define <1 x i16> @lshr.v1i16.imm(<1 x i16> %a) {
|
||||
|
||||
define <1 x i32> @lshr.v1i32.imm(<1 x i32> %a) {
|
||||
; CHECK-LABEL: lshr.v1i32.imm:
|
||||
; CHECK: ushr v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #32
|
||||
%c = lshr <1 x i32> %a, <i32 32>
|
||||
; CHECK: ushr v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, #31
|
||||
%c = lshr <1 x i32> %a, <i32 31>
|
||||
ret <1 x i32> %c
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ define <8 x i8> @vlshri8(<8 x i8>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshri8:
|
||||
;CHECK: vshr.u8
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = lshr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp2 = lshr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
ret <8 x i8> %tmp2
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ define <4 x i16> @vlshri16(<4 x i16>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshri16:
|
||||
;CHECK: vshr.u16
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = lshr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp2 = lshr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
|
||||
ret <4 x i16> %tmp2
|
||||
}
|
||||
|
||||
@ -196,7 +196,7 @@ define <2 x i32> @vlshri32(<2 x i32>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshri32:
|
||||
;CHECK: vshr.u32
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = lshr <2 x i32> %tmp1, < i32 32, i32 32 >
|
||||
%tmp2 = lshr <2 x i32> %tmp1, < i32 31, i32 31 >
|
||||
ret <2 x i32> %tmp2
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ define <1 x i64> @vlshri64(<1 x i64>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshri64:
|
||||
;CHECK: vshr.u64
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = lshr <1 x i64> %tmp1, < i64 64 >
|
||||
%tmp2 = lshr <1 x i64> %tmp1, < i64 63 >
|
||||
ret <1 x i64> %tmp2
|
||||
}
|
||||
|
||||
@ -252,7 +252,7 @@ define <16 x i8> @vlshrQi8(<16 x i8>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshrQi8:
|
||||
;CHECK: vshr.u8
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = lshr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp2 = lshr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
ret <16 x i8> %tmp2
|
||||
}
|
||||
|
||||
@ -260,7 +260,7 @@ define <8 x i16> @vlshrQi16(<8 x i16>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshrQi16:
|
||||
;CHECK: vshr.u16
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = lshr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp2 = lshr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
|
||||
ret <8 x i16> %tmp2
|
||||
}
|
||||
|
||||
@ -268,7 +268,7 @@ define <4 x i32> @vlshrQi32(<4 x i32>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshrQi32:
|
||||
;CHECK: vshr.u32
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = lshr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
|
||||
%tmp2 = lshr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
|
||||
ret <4 x i32> %tmp2
|
||||
}
|
||||
|
||||
@ -276,7 +276,7 @@ define <2 x i64> @vlshrQi64(<2 x i64>* %A) nounwind {
|
||||
;CHECK-LABEL: vlshrQi64:
|
||||
;CHECK: vshr.u64
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = lshr <2 x i64> %tmp1, < i64 64, i64 64 >
|
||||
%tmp2 = lshr <2 x i64> %tmp1, < i64 63, i64 63 >
|
||||
ret <2 x i64> %tmp2
|
||||
}
|
||||
|
||||
@ -331,7 +331,7 @@ define <8 x i8> @vashri8(<8 x i8>* %A) nounwind {
|
||||
;CHECK-LABEL: vashri8:
|
||||
;CHECK: vshr.s8
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = ashr <8 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp2 = ashr <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
ret <8 x i8> %tmp2
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ define <4 x i16> @vashri16(<4 x i16>* %A) nounwind {
|
||||
;CHECK-LABEL: vashri16:
|
||||
;CHECK: vshr.s16
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = ashr <4 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp2 = ashr <4 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15 >
|
||||
ret <4 x i16> %tmp2
|
||||
}
|
||||
|
||||
@ -347,7 +347,7 @@ define <2 x i32> @vashri32(<2 x i32>* %A) nounwind {
|
||||
;CHECK-LABEL: vashri32:
|
||||
;CHECK: vshr.s32
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = ashr <2 x i32> %tmp1, < i32 32, i32 32 >
|
||||
%tmp2 = ashr <2 x i32> %tmp1, < i32 31, i32 31 >
|
||||
ret <2 x i32> %tmp2
|
||||
}
|
||||
|
||||
@ -355,7 +355,7 @@ define <1 x i64> @vashri64(<1 x i64>* %A) nounwind {
|
||||
;CHECK-LABEL: vashri64:
|
||||
;CHECK: vshr.s64
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = ashr <1 x i64> %tmp1, < i64 64 >
|
||||
%tmp2 = ashr <1 x i64> %tmp1, < i64 63 >
|
||||
ret <1 x i64> %tmp2
|
||||
}
|
||||
|
||||
@ -403,7 +403,7 @@ define <16 x i8> @vashrQi8(<16 x i8>* %A) nounwind {
|
||||
;CHECK-LABEL: vashrQi8:
|
||||
;CHECK: vshr.s8
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = ashr <16 x i8> %tmp1, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp2 = ashr <16 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
ret <16 x i8> %tmp2
|
||||
}
|
||||
|
||||
@ -411,7 +411,7 @@ define <8 x i16> @vashrQi16(<8 x i16>* %A) nounwind {
|
||||
;CHECK-LABEL: vashrQi16:
|
||||
;CHECK: vshr.s16
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = ashr <8 x i16> %tmp1, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp2 = ashr <8 x i16> %tmp1, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
|
||||
ret <8 x i16> %tmp2
|
||||
}
|
||||
|
||||
@ -419,7 +419,7 @@ define <4 x i32> @vashrQi32(<4 x i32>* %A) nounwind {
|
||||
;CHECK-LABEL: vashrQi32:
|
||||
;CHECK: vshr.s32
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = ashr <4 x i32> %tmp1, < i32 32, i32 32, i32 32, i32 32 >
|
||||
%tmp2 = ashr <4 x i32> %tmp1, < i32 31, i32 31, i32 31, i32 31 >
|
||||
ret <4 x i32> %tmp2
|
||||
}
|
||||
|
||||
@ -427,6 +427,6 @@ define <2 x i64> @vashrQi64(<2 x i64>* %A) nounwind {
|
||||
;CHECK-LABEL: vashrQi64:
|
||||
;CHECK: vshr.s64
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = ashr <2 x i64> %tmp1, < i64 64, i64 64 >
|
||||
%tmp2 = ashr <2 x i64> %tmp1, < i64 63, i64 63 >
|
||||
ret <2 x i64> %tmp2
|
||||
}
|
||||
|
@ -5,8 +5,8 @@ define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
||||
;CHECK: vsra.s8
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = load <8 x i8>* %B
|
||||
%tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp4 = add <8 x i8> %tmp1, %tmp3
|
||||
%tmp3 = ashr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
%tmp4 = add <8 x i8> %tmp1, %tmp3
|
||||
ret <8 x i8> %tmp4
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
||||
;CHECK: vsra.s16
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = load <4 x i16>* %B
|
||||
%tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp3 = ashr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
|
||||
%tmp4 = add <4 x i16> %tmp1, %tmp3
|
||||
ret <4 x i16> %tmp4
|
||||
}
|
||||
@ -25,7 +25,7 @@ define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
||||
;CHECK: vsra.s32
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = load <2 x i32>* %B
|
||||
%tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 >
|
||||
%tmp3 = ashr <2 x i32> %tmp2, < i32 31, i32 31 >
|
||||
%tmp4 = add <2 x i32> %tmp1, %tmp3
|
||||
ret <2 x i32> %tmp4
|
||||
}
|
||||
@ -35,7 +35,7 @@ define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
|
||||
;CHECK: vsra.s64
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = load <1 x i64>* %B
|
||||
%tmp3 = ashr <1 x i64> %tmp2, < i64 64 >
|
||||
%tmp3 = ashr <1 x i64> %tmp2, < i64 63 >
|
||||
%tmp4 = add <1 x i64> %tmp1, %tmp3
|
||||
ret <1 x i64> %tmp4
|
||||
}
|
||||
@ -45,7 +45,7 @@ define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
||||
;CHECK: vsra.s8
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = load <16 x i8>* %B
|
||||
%tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp3 = ashr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
%tmp4 = add <16 x i8> %tmp1, %tmp3
|
||||
ret <16 x i8> %tmp4
|
||||
}
|
||||
@ -55,7 +55,7 @@ define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vsra.s16
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = load <8 x i16>* %B
|
||||
%tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp3 = ashr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
|
||||
%tmp4 = add <8 x i16> %tmp1, %tmp3
|
||||
ret <8 x i16> %tmp4
|
||||
}
|
||||
@ -65,7 +65,7 @@ define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
||||
;CHECK: vsra.s32
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = load <4 x i32>* %B
|
||||
%tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
|
||||
%tmp3 = ashr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
|
||||
%tmp4 = add <4 x i32> %tmp1, %tmp3
|
||||
ret <4 x i32> %tmp4
|
||||
}
|
||||
@ -75,7 +75,7 @@ define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
|
||||
;CHECK: vsra.s64
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = load <2 x i64>* %B
|
||||
%tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 >
|
||||
%tmp3 = ashr <2 x i64> %tmp2, < i64 63, i64 63 >
|
||||
%tmp4 = add <2 x i64> %tmp1, %tmp3
|
||||
ret <2 x i64> %tmp4
|
||||
}
|
||||
@ -85,7 +85,7 @@ define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
|
||||
;CHECK: vsra.u8
|
||||
%tmp1 = load <8 x i8>* %A
|
||||
%tmp2 = load <8 x i8>* %B
|
||||
%tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp3 = lshr <8 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
%tmp4 = add <8 x i8> %tmp1, %tmp3
|
||||
ret <8 x i8> %tmp4
|
||||
}
|
||||
@ -95,7 +95,7 @@ define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
|
||||
;CHECK: vsra.u16
|
||||
%tmp1 = load <4 x i16>* %A
|
||||
%tmp2 = load <4 x i16>* %B
|
||||
%tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp3 = lshr <4 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15 >
|
||||
%tmp4 = add <4 x i16> %tmp1, %tmp3
|
||||
ret <4 x i16> %tmp4
|
||||
}
|
||||
@ -105,7 +105,7 @@ define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
|
||||
;CHECK: vsra.u32
|
||||
%tmp1 = load <2 x i32>* %A
|
||||
%tmp2 = load <2 x i32>* %B
|
||||
%tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 >
|
||||
%tmp3 = lshr <2 x i32> %tmp2, < i32 31, i32 31 >
|
||||
%tmp4 = add <2 x i32> %tmp1, %tmp3
|
||||
ret <2 x i32> %tmp4
|
||||
}
|
||||
@ -115,7 +115,7 @@ define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
|
||||
;CHECK: vsra.u64
|
||||
%tmp1 = load <1 x i64>* %A
|
||||
%tmp2 = load <1 x i64>* %B
|
||||
%tmp3 = lshr <1 x i64> %tmp2, < i64 64 >
|
||||
%tmp3 = lshr <1 x i64> %tmp2, < i64 63 >
|
||||
%tmp4 = add <1 x i64> %tmp1, %tmp3
|
||||
ret <1 x i64> %tmp4
|
||||
}
|
||||
@ -125,7 +125,7 @@ define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
|
||||
;CHECK: vsra.u8
|
||||
%tmp1 = load <16 x i8>* %A
|
||||
%tmp2 = load <16 x i8>* %B
|
||||
%tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >
|
||||
%tmp3 = lshr <16 x i8> %tmp2, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >
|
||||
%tmp4 = add <16 x i8> %tmp1, %tmp3
|
||||
ret <16 x i8> %tmp4
|
||||
}
|
||||
@ -135,7 +135,7 @@ define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
|
||||
;CHECK: vsra.u16
|
||||
%tmp1 = load <8 x i16>* %A
|
||||
%tmp2 = load <8 x i16>* %B
|
||||
%tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
|
||||
%tmp3 = lshr <8 x i16> %tmp2, < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >
|
||||
%tmp4 = add <8 x i16> %tmp1, %tmp3
|
||||
ret <8 x i16> %tmp4
|
||||
}
|
||||
@ -145,7 +145,7 @@ define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
|
||||
;CHECK: vsra.u32
|
||||
%tmp1 = load <4 x i32>* %A
|
||||
%tmp2 = load <4 x i32>* %B
|
||||
%tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 >
|
||||
%tmp3 = lshr <4 x i32> %tmp2, < i32 31, i32 31, i32 31, i32 31 >
|
||||
%tmp4 = add <4 x i32> %tmp1, %tmp3
|
||||
ret <4 x i32> %tmp4
|
||||
}
|
||||
@ -155,7 +155,7 @@ define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
|
||||
;CHECK: vsra.u64
|
||||
%tmp1 = load <2 x i64>* %A
|
||||
%tmp2 = load <2 x i64>* %B
|
||||
%tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 >
|
||||
%tmp3 = lshr <2 x i64> %tmp2, < i64 63, i64 63 >
|
||||
%tmp4 = add <2 x i64> %tmp1, %tmp3
|
||||
ret <2 x i64> %tmp4
|
||||
}
|
||||
|
@ -24,12 +24,12 @@ entry:
|
||||
|
||||
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = shl <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = shl <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <16 x i16> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_sllw_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpsllw $15, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
||||
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
|
||||
@ -54,12 +54,12 @@ entry:
|
||||
|
||||
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = shl <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = shl <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
|
||||
ret <8 x i32> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_slld_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpslld $31, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
||||
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
|
||||
@ -84,12 +84,12 @@ entry:
|
||||
|
||||
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
|
||||
entry:
|
||||
%shl = shl <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
|
||||
%shl = shl <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
|
||||
ret <4 x i64> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_sllq_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpsllq $63, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
||||
; AVX2 Arithmetic Shift
|
||||
@ -116,7 +116,7 @@ entry:
|
||||
|
||||
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = ashr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = ashr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <16 x i16> %shl
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ entry:
|
||||
|
||||
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = ashr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = ashr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
|
||||
ret <8 x i32> %shl
|
||||
}
|
||||
|
||||
@ -178,12 +178,12 @@ entry:
|
||||
|
||||
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <16 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = lshr <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <16 x i16> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srlw_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpsrlw $15, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
||||
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
|
||||
@ -208,12 +208,12 @@ entry:
|
||||
|
||||
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <8 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = lshr <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
|
||||
ret <8 x i32> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srld_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpsrld $31, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
||||
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
|
||||
@ -238,10 +238,10 @@ entry:
|
||||
|
||||
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <4 x i64> %InVec, <i64 64, i64 64, i64 64, i64 64>
|
||||
%shl = lshr <4 x i64> %InVec, <i64 63, i64 63, i64 63, i64 63>
|
||||
ret <4 x i64> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srlq_3:
|
||||
; CHECK: vxorps %ymm0, %ymm0, %ymm0
|
||||
; CHECK: vpsrlq $63, %ymm0, %ymm0
|
||||
; CHECK: ret
|
||||
|
@ -24,12 +24,12 @@ entry:
|
||||
|
||||
define <8 x i16> @test_sllw_3(<8 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = shl <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <8 x i16> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_sllw_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: psllw $15, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
define <4 x i32> @test_slld_1(<4 x i32> %InVec) {
|
||||
@ -54,12 +54,12 @@ entry:
|
||||
|
||||
define <4 x i32> @test_slld_3(<4 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = shl <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = shl <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
|
||||
ret <4 x i32> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_slld_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: pslld $31, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
define <2 x i64> @test_sllq_1(<2 x i64> %InVec) {
|
||||
@ -84,12 +84,12 @@ entry:
|
||||
|
||||
define <2 x i64> @test_sllq_3(<2 x i64> %InVec) {
|
||||
entry:
|
||||
%shl = shl <2 x i64> %InVec, <i64 64, i64 64>
|
||||
%shl = shl <2 x i64> %InVec, <i64 63, i64 63>
|
||||
ret <2 x i64> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_sllq_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: psllq $63, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
; SSE2 Arithmetic Shift
|
||||
@ -116,7 +116,7 @@ entry:
|
||||
|
||||
define <8 x i16> @test_sraw_3(<8 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = ashr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = ashr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <8 x i16> %shl
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ entry:
|
||||
|
||||
define <4 x i32> @test_srad_3(<4 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = ashr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = ashr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
|
||||
ret <4 x i32> %shl
|
||||
}
|
||||
|
||||
@ -178,12 +178,12 @@ entry:
|
||||
|
||||
define <8 x i16> @test_srlw_3(<8 x i16> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <8 x i16> %InVec, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
|
||||
%shl = lshr <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
|
||||
ret <8 x i16> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srlw_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: psrlw $15, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
define <4 x i32> @test_srld_1(<4 x i32> %InVec) {
|
||||
@ -208,12 +208,12 @@ entry:
|
||||
|
||||
define <4 x i32> @test_srld_3(<4 x i32> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <4 x i32> %InVec, <i32 32, i32 32, i32 32, i32 32>
|
||||
%shl = lshr <4 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31>
|
||||
ret <4 x i32> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srld_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: psrld $31, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
||||
define <2 x i64> @test_srlq_1(<2 x i64> %InVec) {
|
||||
@ -238,10 +238,10 @@ entry:
|
||||
|
||||
define <2 x i64> @test_srlq_3(<2 x i64> %InVec) {
|
||||
entry:
|
||||
%shl = lshr <2 x i64> %InVec, <i64 64, i64 64>
|
||||
%shl = lshr <2 x i64> %InVec, <i64 63, i64 63>
|
||||
ret <2 x i64> %shl
|
||||
}
|
||||
|
||||
; CHECK-LABEL: test_srlq_3:
|
||||
; CHECK: xorps %xmm0, %xmm0
|
||||
; CHECK: psrlq $63, %xmm0
|
||||
; CHECK-NEXT: ret
|
||||
|
Loading…
Reference in New Issue
Block a user