2010-08-26 21:51:41 +00:00
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
2009-07-01 21:38:46 +00:00
|
|
|
|
2011-08-20 14:02:29 +00:00
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
|
|
|
target triple = "x86_64-apple-darwin10.0.0"
|
|
|
|
|
2010-08-26 21:51:41 +00:00
|
|
|
; Bitcasts between vectors and scalars are valid.
|
|
|
|
; PR4487
|
|
|
|
define i32 @test1(i64 %a) {
|
2009-07-01 21:38:46 +00:00
|
|
|
%t1 = bitcast i64 %a to <2 x i32>
|
|
|
|
%t2 = bitcast i64 %a to <2 x i32>
|
|
|
|
%t3 = xor <2 x i32> %t1, %t2
|
|
|
|
%t4 = extractelement <2 x i32> %t3, i32 0
|
|
|
|
ret i32 %t4
|
2013-02-11 21:36:49 +00:00
|
|
|
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test1(
|
2010-08-26 21:51:41 +00:00
|
|
|
; CHECK: ret i32 0
|
2009-07-01 21:38:46 +00:00
|
|
|
}
|
|
|
|
|
optimize bitcast(trunc(bitcast(x))) where the result is a float and 'x'
is a vector to be a vector element extraction. This allows clang to
compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
movd %eax, %xmm0
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movd %xmm1, %rax
movd %eax, %xmm1
addss %xmm2, %xmm1
shrq $32, %rax
movd %eax, %xmm0
addss %xmm1, %xmm0
ret
... eliminating half of the horribleness.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112227 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-26 21:55:42 +00:00
|
|
|
; Optimize bitcasts that are extracting low element of vector. This happens
|
|
|
|
; because of SRoA.
|
|
|
|
; rdar://7892780
|
|
|
|
define float @test2(<2 x float> %A, <2 x i32> %B) {
|
|
|
|
%tmp28 = bitcast <2 x float> %A to i64 ; <i64> [#uses=2]
|
|
|
|
%tmp23 = trunc i64 %tmp28 to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp24 = bitcast i32 %tmp23 to float ; <float> [#uses=1]
|
|
|
|
|
|
|
|
%tmp = bitcast <2 x i32> %B to i64
|
|
|
|
%tmp2 = trunc i64 %tmp to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp4 = bitcast i32 %tmp2 to float ; <float> [#uses=1]
|
|
|
|
|
|
|
|
%add = fadd float %tmp24, %tmp4
|
|
|
|
ret float %add
|
2013-02-11 21:36:49 +00:00
|
|
|
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test2(
|
optimize bitcast(trunc(bitcast(x))) where the result is a float and 'x'
is a vector to be a vector element extraction. This allows clang to
compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
movd %eax, %xmm0
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movd %xmm1, %rax
movd %eax, %xmm1
addss %xmm2, %xmm1
shrq $32, %rax
movd %eax, %xmm0
addss %xmm1, %xmm0
ret
... eliminating half of the horribleness.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112227 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-26 21:55:42 +00:00
|
|
|
; CHECK-NEXT: %tmp24 = extractelement <2 x float> %A, i32 0
|
|
|
|
; CHECK-NEXT: bitcast <2 x i32> %B to <2 x float>
|
|
|
|
; CHECK-NEXT: %tmp4 = extractelement <2 x float> {{.*}}, i32 0
|
|
|
|
; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
|
|
|
|
; CHECK-NEXT: ret float %add
|
|
|
|
}
|
2010-08-26 22:14:59 +00:00
|
|
|
|
|
|
|
; Optimize bitcasts that are extracting other elements of a vector. This
|
|
|
|
; happens because of SRoA.
|
|
|
|
; rdar://7892780
|
|
|
|
define float @test3(<2 x float> %A, <2 x i64> %B) {
|
|
|
|
%tmp28 = bitcast <2 x float> %A to i64
|
|
|
|
%tmp29 = lshr i64 %tmp28, 32
|
|
|
|
%tmp23 = trunc i64 %tmp29 to i32
|
|
|
|
%tmp24 = bitcast i32 %tmp23 to float
|
|
|
|
|
|
|
|
%tmp = bitcast <2 x i64> %B to i128
|
|
|
|
%tmp1 = lshr i128 %tmp, 64
|
|
|
|
%tmp2 = trunc i128 %tmp1 to i32
|
|
|
|
%tmp4 = bitcast i32 %tmp2 to float
|
|
|
|
|
|
|
|
%add = fadd float %tmp24, %tmp4
|
|
|
|
ret float %add
|
2013-02-11 21:36:49 +00:00
|
|
|
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test3(
|
2010-08-26 22:14:59 +00:00
|
|
|
; CHECK-NEXT: %tmp24 = extractelement <2 x float> %A, i32 1
|
|
|
|
; CHECK-NEXT: bitcast <2 x i64> %B to <4 x float>
|
|
|
|
; CHECK-NEXT: %tmp4 = extractelement <4 x float> {{.*}}, i32 2
|
|
|
|
; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
|
|
|
|
; CHECK-NEXT: ret float %add
|
|
|
|
}
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:20:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
define <2 x i32> @test4(i32 %A, i32 %B){
|
|
|
|
%tmp38 = zext i32 %A to i64
|
|
|
|
%tmp32 = zext i32 %B to i64
|
|
|
|
%tmp33 = shl i64 %tmp32, 32
|
|
|
|
%ins35 = or i64 %tmp33, %tmp38
|
|
|
|
%tmp43 = bitcast i64 %ins35 to <2 x i32>
|
|
|
|
ret <2 x i32> %tmp43
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test4(
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:20:38 +00:00
|
|
|
; CHECK-NEXT: insertelement <2 x i32> undef, i32 %A, i32 0
|
|
|
|
; CHECK-NEXT: insertelement <2 x i32> {{.*}}, i32 %B, i32 1
|
2013-02-11 21:36:49 +00:00
|
|
|
; CHECK-NEXT: ret <2 x i32>
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:20:38 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
; rdar://8360454
|
|
|
|
define <2 x float> @test5(float %A, float %B) {
|
|
|
|
%tmp37 = bitcast float %A to i32
|
|
|
|
%tmp38 = zext i32 %tmp37 to i64
|
|
|
|
%tmp31 = bitcast float %B to i32
|
|
|
|
%tmp32 = zext i32 %tmp31 to i64
|
|
|
|
%tmp33 = shl i64 %tmp32, 32
|
|
|
|
%ins35 = or i64 %tmp33, %tmp38
|
|
|
|
%tmp43 = bitcast i64 %ins35 to <2 x float>
|
|
|
|
ret <2 x float> %tmp43
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test5(
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:20:38 +00:00
|
|
|
; CHECK-NEXT: insertelement <2 x float> undef, float %A, i32 0
|
|
|
|
; CHECK-NEXT: insertelement <2 x float> {{.*}}, float %B, i32 1
|
2013-02-11 21:36:49 +00:00
|
|
|
; CHECK-NEXT: ret <2 x float>
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:20:38 +00:00
|
|
|
}
|
handle the constant case of vector insertion. For something
like this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.B;
A.A = 42;
return A;
}
we now generate:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
pshufd $16, %xmm2, %xmm2
movss LCPI1_1(%rip), %xmm0
pshufd $16, %xmm0, %xmm0
unpcklps %xmm2, %xmm0
ret
instead of:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
movd %xmm2, %eax
shlq $32, %rax
addq $1109917696, %rax ## imm = 0x42280000
movd %rax, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112345 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:50:57 +00:00
|
|
|
|
|
|
|
define <2 x float> @test6(float %A){
|
|
|
|
%tmp23 = bitcast float %A to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp24 = zext i32 %tmp23 to i64 ; <i64> [#uses=1]
|
|
|
|
%tmp25 = shl i64 %tmp24, 32 ; <i64> [#uses=1]
|
|
|
|
%mask20 = or i64 %tmp25, 1109917696 ; <i64> [#uses=1]
|
|
|
|
%tmp35 = bitcast i64 %mask20 to <2 x float> ; <<2 x float>> [#uses=1]
|
|
|
|
ret <2 x float> %tmp35
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @test6(
|
handle the constant case of vector insertion. For something
like this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.B;
A.A = 42;
return A;
}
we now generate:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
pshufd $16, %xmm2, %xmm2
movss LCPI1_1(%rip), %xmm0
pshufd $16, %xmm0, %xmm0
unpcklps %xmm2, %xmm0
ret
instead of:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
movd %xmm2, %eax
shlq $32, %rax
addq $1109917696, %rax ## imm = 0x42280000
movd %rax, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112345 91177308-0d34-0410-b5e6-96231b3b80d8
2010-08-28 01:50:57 +00:00
|
|
|
; CHECK-NEXT: insertelement <2 x float> <float 4.200000e+01, float undef>, float %A, i32 1
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
2011-08-20 14:02:29 +00:00
|
|
|
|
|
|
|
define i64 @ISPC0(i64 %in) {
|
|
|
|
%out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1> to i64), i64 -1)
|
|
|
|
ret i64 %out
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @ISPC0(
|
2011-08-20 14:02:29 +00:00
|
|
|
; CHECK: ret i64 0
|
|
|
|
}
|
2011-08-24 20:18:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
define i64 @Vec2(i64 %in) {
|
|
|
|
%out = and i64 %in, xor (i64 bitcast (<4 x i16> <i16 0, i16 0, i16 0, i16 0> to i64), i64 0)
|
|
|
|
ret i64 %out
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @Vec2(
|
2011-08-24 20:18:38 +00:00
|
|
|
; CHECK: ret i64 0
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @All11(i64 %in) {
|
2013-02-11 21:36:49 +00:00
|
|
|
%out = and i64 %in, xor (i64 bitcast (<2 x float> bitcast (i64 -1 to <2 x float>) to i64), i64 -1)
|
2011-08-24 20:18:38 +00:00
|
|
|
ret i64 %out
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @All11(
|
2011-08-24 20:18:38 +00:00
|
|
|
; CHECK: ret i64 0
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
define i32 @All111(i32 %in) {
|
2013-02-11 21:36:49 +00:00
|
|
|
%out = and i32 %in, xor (i32 bitcast (<1 x float> bitcast (i32 -1 to <1 x float>) to i32), i32 -1)
|
2011-08-24 20:18:38 +00:00
|
|
|
ret i32 %out
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @All111(
|
2011-08-24 20:18:38 +00:00
|
|
|
; CHECK: ret i32 0
|
|
|
|
}
|
|
|
|
|
2013-02-11 21:41:44 +00:00
|
|
|
define <2 x i16> @BitcastInsert(i32 %a) {
|
|
|
|
%v = insertelement <1 x i32> undef, i32 %a, i32 0
|
|
|
|
%r = bitcast <1 x i32> %v to <2 x i16>
|
|
|
|
ret <2 x i16> %r
|
2013-07-14 01:42:54 +00:00
|
|
|
; CHECK-LABEL: @BitcastInsert(
|
2013-02-11 21:41:44 +00:00
|
|
|
; CHECK: bitcast i32 %a to <2 x i16>
|
|
|
|
}
|
2013-09-19 20:59:04 +00:00
|
|
|
|
|
|
|
; PR17293
|
|
|
|
define <2 x i64> @test7(<2 x i8*>* %arg) nounwind {
|
|
|
|
%cast = bitcast <2 x i8*>* %arg to <2 x i64>*
|
|
|
|
%load = load <2 x i64>* %cast, align 16
|
|
|
|
ret <2 x i64> %load
|
|
|
|
; CHECK: @test7
|
|
|
|
; CHECK: bitcast
|
|
|
|
; CHECK: load
|
|
|
|
}
|