mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-26 05:32:25 +00:00
38c6b58eec
Summary: AsmPrinter::EmitInlineAsm() will no longer use the EmitRawText() call for targets with mature MC support. Such targets will always parse the inline assembly (even when emitting assembly). Targets without mature MC support continue to use EmitRawText() for assembly output. The hasRawTextSupport() check in AsmPrinter::EmitInlineAsm() has been replaced with MCAsmInfo::UseIntegratedAs which when true, causes the integrated assembler to parse inline assembly (even when emitting assembly output). UseIntegratedAs is set to true for targets that consider any failure to parse valid assembly to be a bug. Target specific subclasses generally enable the integrated assembler in their constructor. The default value can be overridden with -no-integrated-as. All tests that rely on inline assembly supporting invalid assembly (for example, those that use mnemonics such as 'foo' or 'hello world') have been updated to disable the integrated assembler. Changes since review (and last commit attempt): - Fixed test failures that were missed due to configuration of local build. (fixes crash.ll and a couple others). - Fixed tests that happened to pass because the local build was on X86 (should fix 2007-12-17-InvokeAsm.ll) - mature-mc-support.ll's should no longer require all targets to be compiled. (should fix ARM and PPC buildbots) - Object output (-filetype=obj and similar) now forces the integrated assembler to be enabled regardless of default setting or -no-integrated-as. (should fix SystemZ buildbots) Reviewers: rafael Reviewed By: rafael CC: llvm-commits Differential Revision: http://llvm-reviews.chandlerc.com/D2686 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201333 91177308-0d34-0410-b5e6-96231b3b80d8
53 lines
2.3 KiB
LLVM
53 lines
2.3 KiB
LLVM
; RUN: llc < %s -relocation-model=pic -disable-fp-elim -mcpu=cortex-a8 -pre-RA-sched=source -no-integrated-as | FileCheck %s
|
|
target triple = "thumbv7-apple-ios"
|
|
; <rdar://problem/10032939>
|
|
;
|
|
; The vector %v2 is built like this:
|
|
;
|
|
; %vreg6:ssub_1<def> = ...
|
|
; %vreg6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%vreg6
|
|
;
|
|
; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized
|
|
; since it implicitly reads the ssub_1 sub-register.
|
|
;
|
|
; CHECK: f1
|
|
; CHECK: vmov d0, r0, r0
|
|
; CHECK: vldr s1, LCPI
|
|
; The vector must be spilled:
|
|
; CHECK: vstr d0,
|
|
; CHECK: asm clobber d0
|
|
; And reloaded after the asm:
|
|
; CHECK: vldr [[D16:d[0-9]+]],
|
|
; CHECK: vstr [[D16]], [r1]
|
|
define void @f1(float %x, <2 x float>* %p) {
|
|
%v1 = insertelement <2 x float> undef, float %x, i32 0
|
|
%v2 = insertelement <2 x float> %v1, float 0x400921FB60000000, i32 1
|
|
%y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
|
|
store <2 x float> %v2, <2 x float>* %p, align 8
|
|
ret void
|
|
}
|
|
|
|
; On the other hand, when the partial redef doesn't read the full register
|
|
; because the bits are undef, we should rematerialize. The vector is now built
|
|
; like this:
|
|
;
|
|
; %vreg2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %vreg2<imp-def>; mem:LD4[ConstantPool]
|
|
;
|
|
; The extra <imp-def> operand indicates that the instruction fully defines the
|
|
; virtual register. It doesn't read the old value.
|
|
;
|
|
; CHECK: f2
|
|
; CHECK: vldr s0, LCPI
|
|
; The vector must not be spilled:
|
|
; CHECK-NOT: vstr
|
|
; CHECK: asm clobber d0
|
|
; But instead rematerialize after the asm:
|
|
; CHECK: vldr [[S0:s[0-9]+]], LCPI
|
|
; CHECK: vstr [[D0:d[0-9]+]], [r0]
|
|
define void @f2(<2 x float>* %p) {
|
|
%v2 = insertelement <2 x float> undef, float 0x400921FB60000000, i32 0
|
|
%y = call double asm sideeffect "asm clobber $0", "=w,0,~{d1},~{d2},~{d3},~{d4},~{d5},~{d6},~{d7},~{d8},~{d9},~{d10},~{d11},~{d12},~{d13},~{d14},~{d15},~{d16},~{d17},~{d18},~{d19},~{d20},~{d21},~{d22},~{d23},~{d24},~{d25},~{d26},~{d27},~{d28},~{d29},~{d30},~{d31}"(<2 x float> %v2) nounwind
|
|
store <2 x float> %v2, <2 x float>* %p, align 8
|
|
ret void
|
|
}
|