Enable MachineVerifier in debug mode for X86, ARM, AArch64, Mips.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224075 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matthias Braun 2014-12-11 23:18:03 +00:00
parent 11fcb48306
commit 8ac056b9dd
4 changed files with 20 additions and 20 deletions

View File

@ -270,7 +270,7 @@ bool AArch64PassConfig::addILPOpts() {
void AArch64PassConfig::addPreRegAlloc() {
// Use AdvSIMD scalar instructions whenever profitable.
if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
addPass(createAArch64AdvSIMDScalar(), false);
addPass(createAArch64AdvSIMDScalar());
// The AdvSIMD pass may produce copies that can be rewritten to
// be register coaleascer friendly.
addPass(&PeepholeOptimizerID);
@ -280,7 +280,7 @@ void AArch64PassConfig::addPreRegAlloc() {
void AArch64PassConfig::addPostRegAlloc() {
// Change dead register definitions to refer to the zero register.
if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
addPass(createAArch64DeadRegisterDefinitions(), false);
addPass(createAArch64DeadRegisterDefinitions());
if (TM->getOptLevel() != CodeGenOpt::None &&
(TM->getSubtarget<AArch64Subtarget>().isCortexA53() ||
TM->getSubtarget<AArch64Subtarget>().isCortexA57()) &&
@ -291,7 +291,7 @@ void AArch64PassConfig::addPostRegAlloc() {
void AArch64PassConfig::addPreSched2() {
// Expand some pseudo instructions to allow proper scheduling.
addPass(createAArch64ExpandPseudoPass(), false);
addPass(createAArch64ExpandPseudoPass());
// Use load/store pair instructions when possible.
if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
addPass(createAArch64LoadStoreOptimizationPass());
@ -299,10 +299,10 @@ void AArch64PassConfig::addPreSched2() {
void AArch64PassConfig::addPreEmitPass() {
if (EnableA53Fix835769)
addPass(createAArch64A53Fix835769(), false);
addPass(createAArch64A53Fix835769());
// Relax conditional branch instructions if they're otherwise out of
// range of their destination.
addPass(createAArch64BranchRelaxation(), false);
addPass(createAArch64BranchRelaxation());
if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
TM->getSubtarget<AArch64Subtarget>().isTargetMachO())
addPass(createAArch64CollectLOHPass());

View File

@ -243,9 +243,9 @@ bool ARMPassConfig::addInstSelector() {
void ARMPassConfig::addPreRegAlloc() {
if (getOptLevel() != CodeGenOpt::None)
addPass(createARMLoadStoreOptimizationPass(true), false);
addPass(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
addPass(createMLxExpansionPass(), false);
addPass(createMLxExpansionPass());
// Since the A15SDOptimizer pass can insert VDUP instructions, it can only be
// enabled when NEON is available.
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA15() &&
@ -256,23 +256,23 @@ void ARMPassConfig::addPreRegAlloc() {
void ARMPassConfig::addPreSched2() {
if (getOptLevel() != CodeGenOpt::None) {
addPass(createARMLoadStoreOptimizationPass(), false);
addPass(createARMLoadStoreOptimizationPass());
if (getARMSubtarget().hasNEON())
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass), false);
addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
// Expand some pseudo instructions into multiple instructions to allow
// proper scheduling.
addPass(createARMExpandPseudoPass(), false);
addPass(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only()) {
// in v8, IfConversion depends on Thumb instruction widths
if (getARMSubtarget().restrictIT() &&
!getARMSubtarget().prefers32BitThumb())
addPass(createThumb2SizeReductionPass(), false);
addPass(&IfConverterID, false);
addPass(createThumb2SizeReductionPass());
addPass(&IfConverterID);
}
}
if (getARMSubtarget().isThumb2())
@ -282,12 +282,12 @@ void ARMPassConfig::addPreSched2() {
void ARMPassConfig::addPreEmitPass() {
if (getARMSubtarget().isThumb2()) {
if (!getARMSubtarget().prefers32BitThumb())
addPass(createThumb2SizeReductionPass(), false);
addPass(createThumb2SizeReductionPass());
// Constant island pass work on unbundled instructions.
addPass(&UnpackMachineBundlesID, false);
addPass(&UnpackMachineBundlesID);
}
addPass(createARMOptimizeBarriersPass(), false);
addPass(createARMOptimizeBarriersPass());
addPass(createARMConstantIslandPass());
}

View File

@ -226,7 +226,7 @@ void MipsTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
// print out the code after the passes.
void MipsPassConfig::addPreEmitPass() {
MipsTargetMachine &TM = getMipsTargetMachine();
addPass(createMipsDelaySlotFillerPass(TM), false);
addPass(createMipsLongBranchPass(TM), false);
addPass(createMipsDelaySlotFillerPass(TM));
addPass(createMipsLongBranchPass(TM));
addPass(createMipsConstantIslandPass(TM));
}

View File

@ -193,13 +193,13 @@ void X86PassConfig::addPostRegAlloc() {
void X86PassConfig::addPreEmitPass() {
if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2())
addPass(createExecutionDependencyFixPass(&X86::VR128RegClass), false);
addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
if (UseVZeroUpper)
addPass(createX86IssueVZeroUpperPass(), false);
addPass(createX86IssueVZeroUpperPass());
if (getOptLevel() != CodeGenOpt::None) {
addPass(createX86PadShortFunctions(), false);
addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs());
}
}