Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162919 91177308-0d34-0410-b5e6-96231b3b80d8
2012-08-30 16:54:46 +00:00
|
|
|
; RUN: llc %s -o - -march=x86-64 -mattr=-avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=SSE
|
|
|
|
; RUN: llc %s -o - -march=x86-64 -mattr=+avx -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=AVX
|
2009-09-09 14:22:57 +00:00
|
|
|
; PR4891
|
2011-11-28 20:42:56 +00:00
|
|
|
; PR5626
|
2009-09-09 14:22:57 +00:00
|
|
|
|
|
|
|
; This load should be before the call, not after.
|
|
|
|
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162919 91177308-0d34-0410-b5e6-96231b3b80d8
2012-08-30 16:54:46 +00:00
|
|
|
; SSE: movaps compl+128(%rip), %xmm0
|
|
|
|
; SSE: movaps %xmm0, (%rsp)
|
|
|
|
; SSE: callq killcommon
|
|
|
|
|
|
|
|
; AVX: vmovapd compl+128(%rip), %xmm0
|
|
|
|
; AVX: vmovapd %xmm0, (%rsp)
|
|
|
|
; AVX: callq killcommon
|
2009-09-09 14:22:57 +00:00
|
|
|
|
|
|
|
@compl = linkonce global [20 x i64] zeroinitializer, align 64 ; <[20 x i64]*> [#uses=1]
|
|
|
|
|
|
|
|
declare void @killcommon(i32* noalias)
|
|
|
|
|
|
|
|
define void @reset(<2 x float>* noalias %garbage1) {
|
|
|
|
"file complex.c, line 27, bb1":
|
|
|
|
%changed = alloca i32, align 4 ; <i32*> [#uses=3]
|
|
|
|
br label %"file complex.c, line 27, bb13"
|
|
|
|
|
|
|
|
"file complex.c, line 27, bb13": ; preds = %"file complex.c, line 27, bb1"
|
|
|
|
store i32 0, i32* %changed, align 4
|
|
|
|
%r2 = getelementptr float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
|
|
|
|
%r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
|
2011-11-28 22:37:34 +00:00
|
|
|
%r4 = load <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
|
2009-09-09 14:22:57 +00:00
|
|
|
call void @killcommon(i32* %changed)
|
|
|
|
br label %"file complex.c, line 34, bb4"
|
|
|
|
|
|
|
|
"file complex.c, line 34, bb4": ; preds = %"file complex.c, line 27, bb13"
|
|
|
|
%r5 = load i32* %changed, align 4 ; <i32> [#uses=1]
|
|
|
|
%r6 = icmp eq i32 %r5, 0 ; <i1> [#uses=1]
|
|
|
|
%r7 = zext i1 %r6 to i32 ; <i32> [#uses=1]
|
|
|
|
%r8 = icmp ne i32 %r7, 0 ; <i1> [#uses=1]
|
|
|
|
br i1 %r8, label %"file complex.c, line 34, bb7", label %"file complex.c, line 27, bb5"
|
|
|
|
|
|
|
|
"file complex.c, line 27, bb5": ; preds = %"file complex.c, line 34, bb4"
|
|
|
|
br label %"file complex.c, line 35, bb6"
|
|
|
|
|
|
|
|
"file complex.c, line 35, bb6": ; preds = %"file complex.c, line 27, bb5"
|
|
|
|
%r11 = ptrtoint <2 x float>* %garbage1 to i64 ; <i64> [#uses=1]
|
|
|
|
%r12 = inttoptr i64 %r11 to <2 x float>* ; <<2 x float>*> [#uses=1]
|
|
|
|
store <2 x float> %r4, <2 x float>* %r12, align 4
|
|
|
|
br label %"file complex.c, line 34, bb7"
|
|
|
|
|
|
|
|
"file complex.c, line 34, bb7": ; preds = %"file complex.c, line 35, bb6", %"file complex.c, line 34, bb4"
|
|
|
|
ret void
|
|
|
|
}
|