Enable machineverifier in debug mode for X86, ARM, AArch64, Mips

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224043 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matthias Braun
2014-12-11 19:42:09 +00:00
parent 71f56c4aac
commit e9256e340b
4 changed files with 20 additions and 20 deletions

View File

@@ -270,7 +270,7 @@ bool AArch64PassConfig::addILPOpts() {
void AArch64PassConfig::addPreRegAlloc() { void AArch64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable. // Use AdvSIMD scalar instructions whenever profitable.
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) { if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
addPass(createAArch64AdvSIMDScalar(), false); addPass(createAArch64AdvSIMDScalar());
// The AdvSIMD pass may produce copies that can be rewritten to // The AdvSIMD pass may produce copies that can be rewritten to
// be register coaleascer friendly. // be register coaleascer friendly.
addPass(&PeepholeOptimizerID); addPass(&PeepholeOptimizerID);
@@ -280,7 +280,7 @@ void AArch64PassConfig::addPreRegAlloc() {
void AArch64PassConfig::addPostRegAlloc() { void AArch64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register. // Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination) if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions(), false); addPass(createAArch64DeadRegisterDefinitions());
if (TM->getOptLevel() != CodeGenOpt::None && if (TM->getOptLevel() != CodeGenOpt::None &&
(TM->getSubtarget<AArch64Subtarget>().isCortexA53() || (TM->getSubtarget<AArch64Subtarget>().isCortexA53() ||
TM->getSubtarget<AArch64Subtarget>().isCortexA57()) && TM->getSubtarget<AArch64Subtarget>().isCortexA57()) &&
@@ -291,7 +291,7 @@ void AArch64PassConfig::addPostRegAlloc() {
void AArch64PassConfig::addPreSched2() { void AArch64PassConfig::addPreSched2() {
// Expand some pseudo instructions to allow proper scheduling. // Expand some pseudo instructions to allow proper scheduling.
addPass(createAArch64ExpandPseudoPass(), false); addPass(createAArch64ExpandPseudoPass());
// Use load/store pair instructions when possible. // Use load/store pair instructions when possible.
if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt) if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
addPass(createAArch64LoadStoreOptimizationPass()); addPass(createAArch64LoadStoreOptimizationPass());
@@ -299,10 +299,10 @@ void AArch64PassConfig::addPreSched2() {
void AArch64PassConfig::addPreEmitPass() { void AArch64PassConfig::addPreEmitPass() {
if (EnableA53Fix835769) if (EnableA53Fix835769)
addPass(createAArch64A53Fix835769(), false); addPass(createAArch64A53Fix835769());
// Relax conditional branch instructions if they're otherwise out of // Relax conditional branch instructions if they're otherwise out of
// range of their destination. // range of their destination.
addPass(createAArch64BranchRelaxation(), false); addPass(createAArch64BranchRelaxation());
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
TM->getSubtarget<AArch64Subtarget>().isTargetMachO()) TM->getSubtarget<AArch64Subtarget>().isTargetMachO())
addPass(createAArch64CollectLOHPass()); addPass(createAArch64CollectLOHPass());

View File

@@ -243,9 +243,9 @@ bool ARMPassConfig::addInstSelector() {
void ARMPassConfig::addPreRegAlloc() { void ARMPassConfig::addPreRegAlloc() {
if (getOptLevel() != CodeGenOpt::None) if (getOptLevel() != CodeGenOpt::None)
addPass(createARMLoadStoreOptimizationPass(true), false); addPass(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
addPass(createMLxExpansionPass(), false); addPass(createMLxExpansionPass());
// Since the A15SDOptimizer pass can insert VDUP instructions, it can only be // Since the A15SDOptimizer pass can insert VDUP instructions, it can only be
// enabled when NEON is available. // enabled when NEON is available.
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA15() && if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA15() &&
@@ -256,23 +256,23 @@ void ARMPassConfig::addPreRegAlloc() {
void ARMPassConfig::addPreSched2() { void ARMPassConfig::addPreSched2() {
if (getOptLevel() != CodeGenOpt::None) { if (getOptLevel() != CodeGenOpt::None) {
addPass(createARMLoadStoreOptimizationPass(), false); addPass(createARMLoadStoreOptimizationPass());
if (getARMSubtarget().hasNEON()) if (getARMSubtarget().hasNEON())
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass), false); addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
} }
// Expand some pseudo instructions into multiple instructions to allow // Expand some pseudo instructions into multiple instructions to allow
// proper scheduling. // proper scheduling.
addPass(createARMExpandPseudoPass(), false); addPass(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) { if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only()) { if (!getARMSubtarget().isThumb1Only()) {
// in v8, IfConversion depends on Thumb instruction widths // in v8, IfConversion depends on Thumb instruction widths
if (getARMSubtarget().restrictIT() && if (getARMSubtarget().restrictIT() &&
!getARMSubtarget().prefers32BitThumb()) !getARMSubtarget().prefers32BitThumb())
addPass(createThumb2SizeReductionPass(), false); addPass(createThumb2SizeReductionPass());
addPass(&IfConverterID, false); addPass(&IfConverterID);
} }
} }
if (getARMSubtarget().isThumb2()) if (getARMSubtarget().isThumb2())
@@ -282,12 +282,12 @@ void ARMPassConfig::addPreSched2() {
void ARMPassConfig::addPreEmitPass() { void ARMPassConfig::addPreEmitPass() {
if (getARMSubtarget().isThumb2()) { if (getARMSubtarget().isThumb2()) {
if (!getARMSubtarget().prefers32BitThumb()) if (!getARMSubtarget().prefers32BitThumb())
addPass(createThumb2SizeReductionPass(), false); addPass(createThumb2SizeReductionPass());
// Constant island pass work on unbundled instructions. // Constant island pass work on unbundled instructions.
addPass(&UnpackMachineBundlesID, false); addPass(&UnpackMachineBundlesID);
} }
addPass(createARMOptimizeBarriersPass(), false); addPass(createARMOptimizeBarriersPass());
addPass(createARMConstantIslandPass()); addPass(createARMConstantIslandPass());
} }

View File

@@ -226,7 +226,7 @@ void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// print out the code after the passes. // print out the code after the passes.
void MipsPassConfig::addPreEmitPass() { void MipsPassConfig::addPreEmitPass() {
MipsTargetMachine &TM = getMipsTargetMachine(); MipsTargetMachine &TM = getMipsTargetMachine();
addPass(createMipsDelaySlotFillerPass(TM), false); addPass(createMipsDelaySlotFillerPass(TM));
addPass(createMipsLongBranchPass(TM), false); addPass(createMipsLongBranchPass(TM));
addPass(createMipsConstantIslandPass(TM)); addPass(createMipsConstantIslandPass(TM));
} }

View File

@@ -193,13 +193,13 @@ void X86PassConfig::addPostRegAlloc() {
void X86PassConfig::addPreEmitPass() { void X86PassConfig::addPreEmitPass() {
if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2())
addPass(createExecutionDependencyFixPass(&X86::VR128RegClass), false); addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
if (UseVZeroUpper) if (UseVZeroUpper)
addPass(createX86IssueVZeroUpperPass(), false); addPass(createX86IssueVZeroUpperPass());
if (getOptLevel() != CodeGenOpt::None) { if (getOptLevel() != CodeGenOpt::None) {
addPass(createX86PadShortFunctions(), false); addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs()); addPass(createX86FixupLEAs());
} }
} }