diff --git a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp index 447ab9de95b..543af8e4e77 100644 --- a/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp +++ b/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp @@ -656,7 +656,7 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall( Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8)); MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg( AddressRegI32)); @@ -690,7 +690,7 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall( MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8)); EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg( ShadowRegI32)); - EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr)); EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); @@ -731,7 +731,7 @@ void X86AddressSanitizer32::InstrumentMemOperandLarge( } MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); @@ -747,7 +747,7 @@ void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize, const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); EmitInstruction( Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX)); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); // Instrument first and last elements in src and dst range. InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */, @@ -927,7 +927,7 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall( Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8)); MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg( AddressRegI32)); @@ -961,7 +961,7 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall( MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8)); EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg( ShadowRegI32)); - EmitInstruction(Out, MCInstBuilder(X86::JL_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr)); EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); @@ -1003,7 +1003,7 @@ void X86AddressSanitizer64::InstrumentMemOperandLarge( MCSymbol *DoneSym = Ctx.CreateTempSymbol(); const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx); EmitLabel(Out, DoneSym); @@ -1019,7 +1019,7 @@ void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize, const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx); EmitInstruction( Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX)); - EmitInstruction(Out, MCInstBuilder(X86::JE_4).addExpr(DoneExpr)); + EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr)); // Instrument first and last elements in src and dst range. InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */, diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index d4459eec8ba..7849801f765 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -1362,7 +1362,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // X86 requires a second branch to handle UNE (and OEQ, which is mapped // to UNE above). if (NeedExtraBranch) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1)) .addMBB(TrueMBB); } @@ -1399,10 +1399,10 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) .addReg(OpReg).addImm(1); - unsigned JmpOpc = X86::JNE_4; + unsigned JmpOpc = X86::JNE_1; if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); - JmpOpc = X86::JE_4; + JmpOpc = X86::JE_1; } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc)) @@ -1444,7 +1444,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) .addReg(OpReg).addImm(1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1)) .addMBB(TrueMBB); fastEmitBranch(FalseMBB, DbgLoc); uint32_t BranchWeight = 0; diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 95bca7d936f..8779acc6a08 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -1617,7 +1617,7 @@ X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const { // This jump is taken if SP >= (Stacklet Limit + Stack Space required). // It jumps to normal execution of the function body. - BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB); + BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB); // On 32 bit we first push the arguments size and then the frame size. On 64 // bit, we pass the stack frame size in r10 and the argument size in r11. @@ -1821,7 +1821,7 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const { // SPLimitOffset is in a fixed heap location (pointed by BP). addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)) .addReg(ScratchReg), PReg, false, SPLimitOffset); - BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB); + BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB); // Create new MBB for IncStack: BuildMI(incStackMBB, DL, TII.get(CALLop)). @@ -1830,7 +1830,7 @@ void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const { SPReg, false, -MaxStack); addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)) .addReg(ScratchReg), PReg, false, SPLimitOffset); - BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB); + BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB); stackCheckMBB->addSuccessor(&prologueMBB, 99); stackCheckMBB->addSuccessor(incStackMBB, 1); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 7f177bda9ae..7fe8e8b344a 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1891,8 +1891,8 @@ static bool HasNoSignedComparisonUses(SDNode *N) { case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr: case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm: case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm: - case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4: - case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4: + case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1: + case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1: case X86::CMOVA16rr: case X86::CMOVA16rm: case X86::CMOVA32rr: case X86::CMOVA32rm: case X86::CMOVA64rr: case X86::CMOVA64rm: diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e8bac7b9312..9e068616ddc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -20527,7 +20527,7 @@ X86TargetLowering::EmitVAARG64WithCustomInserter( .setMemRefs(MMOBegin, MMOEnd); // Jump to endMBB - BuildMI(offsetMBB, DL, TII->get(X86::JMP_4)) + BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) .addMBB(endMBB); } @@ -20641,7 +20641,7 @@ X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( if (!Subtarget->isTargetWin64()) { // If %al is 0, branch around the XMM save block. BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); - BuildMI(MBB, DL, TII->get(X86::JE_4)).addMBB(EndMBB); + BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB); MBB->addSuccessor(EndMBB); } @@ -20845,7 +20845,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr)) .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) .addReg(SPLimitVReg); - BuildMI(BB, DL, TII->get(X86::JG_4)).addMBB(mallocMBB); + BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB); // bumpMBB simply decreases the stack pointer, since we know the current // stacklet has enough space. @@ -20853,7 +20853,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, .addReg(SPLimitVReg); BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) .addReg(SPLimitVReg); - BuildMI(bumpMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); + BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB); // Calls into a routine in libgcc to allocate more space from the heap. const uint32_t *RegMask = MF->getTarget() @@ -20892,7 +20892,7 @@ X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI, BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) .addReg(IsLP64 ? X86::RAX : X86::EAX); - BuildMI(mallocMBB, DL, TII->get(X86::JMP_4)).addMBB(continueMBB); + BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB); // Set up the CFG correctly. BB->addSuccessor(bumpMBB); @@ -21171,7 +21171,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, .setMIFlag(MachineInstr::FrameSetup); } BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); - BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); + BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB); restoreMBB->addSuccessor(sinkMBB); MI->eraseFromParent(); diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td index 80ff5e94b39..48c9897cd03 100644 --- a/lib/Target/X86/X86InstrControl.td +++ b/lib/Target/X86/X86InstrControl.td @@ -57,33 +57,32 @@ let isTerminator = 1, isReturn = 1, isBarrier = 1, // Unconditional branches. let isBarrier = 1, isBranch = 1, isTerminator = 1, SchedRW = [WriteJump] in { - def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst), - "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize32; - def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst), - "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>, OpSize16, - Requires<[In16BitMode]>; - let hasSideEffects = 0 in def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst), - "jmp\t$dst", [], IIC_JMP_REL>; + "jmp\t$dst", [(br bb:$dst)], IIC_JMP_REL>; + let hasSideEffects = 0 in { + def JMP_2 : Ii16PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst), + "jmp\t$dst", [], IIC_JMP_REL>, OpSize16; + def JMP_4 : Ii32PCRel<0xE9, RawFrm, (outs), (ins brtarget:$dst), + "jmp\t$dst", [], IIC_JMP_REL>, OpSize32; + } } // Conditional Branches. let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump] in { multiclass ICBr opc1, bits<8> opc4, string asm, PatFrag Cond> { - let hasSideEffects = 0 in - def _1 : Ii8PCRel ; - def _2 : Ii16PCRel, OpSize16, - TB, Requires<[In16BitMode]>; - def _4 : Ii32PCRel, TB, - OpSize32; + def _1 : Ii8PCRel ; + let hasSideEffects = 0 in { + def _2 : Ii16PCRel, OpSize16, TB; + def _4 : Ii32PCRel, TB, OpSize32; + } } } defm JO : ICBr<0x70, 0x80, "jo\t$dst" , X86_COND_O>; -defm JNO : ICBr<0x71, 0x81, "jno\t$dst" , X86_COND_NO>; +defm JNO : ICBr<0x71, 0x81, "jno\t$dst", X86_COND_NO>; defm JB : ICBr<0x72, 0x82, "jb\t$dst" , X86_COND_B>; defm JAE : ICBr<0x73, 0x83, "jae\t$dst", X86_COND_AE>; defm JE : ICBr<0x74, 0x84, "je\t$dst" , X86_COND_E>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 3c8208a2c27..d643258b108 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2723,22 +2723,22 @@ bool X86InstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, static X86::CondCode getCondFromBranchOpc(unsigned BrOpc) { switch (BrOpc) { default: return X86::COND_INVALID; - case X86::JE_4: return X86::COND_E; - case X86::JNE_4: return X86::COND_NE; - case X86::JL_4: return X86::COND_L; - case X86::JLE_4: return X86::COND_LE; - case X86::JG_4: return X86::COND_G; - case X86::JGE_4: return X86::COND_GE; - case X86::JB_4: return X86::COND_B; - case X86::JBE_4: return X86::COND_BE; - case X86::JA_4: return X86::COND_A; - case X86::JAE_4: return X86::COND_AE; - case X86::JS_4: return X86::COND_S; - case X86::JNS_4: return X86::COND_NS; - case X86::JP_4: return X86::COND_P; - case X86::JNP_4: return X86::COND_NP; - case X86::JO_4: return X86::COND_O; - case X86::JNO_4: return X86::COND_NO; + case X86::JE_1: return X86::COND_E; + case X86::JNE_1: return X86::COND_NE; + case X86::JL_1: return X86::COND_L; + case X86::JLE_1: return X86::COND_LE; + case X86::JG_1: return X86::COND_G; + case X86::JGE_1: return X86::COND_GE; + case X86::JB_1: return X86::COND_B; + case X86::JBE_1: return X86::COND_BE; + case X86::JA_1: return X86::COND_A; + case X86::JAE_1: return X86::COND_AE; + case X86::JS_1: return X86::COND_S; + case X86::JNS_1: return X86::COND_NS; + case X86::JP_1: return X86::COND_P; + case X86::JNP_1: return X86::COND_NP; + case X86::JO_1: return X86::COND_O; + case X86::JNO_1: return X86::COND_NO; } } @@ -2823,22 +2823,22 @@ X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) { unsigned X86::GetCondBranchFromCond(X86::CondCode CC) { switch (CC) { default: llvm_unreachable("Illegal condition code!"); - case X86::COND_E: return X86::JE_4; - case X86::COND_NE: return X86::JNE_4; - case X86::COND_L: return X86::JL_4; - case X86::COND_LE: return X86::JLE_4; - case X86::COND_G: return X86::JG_4; - case X86::COND_GE: return X86::JGE_4; - case X86::COND_B: return X86::JB_4; - case X86::COND_BE: return X86::JBE_4; - case X86::COND_A: return X86::JA_4; - case X86::COND_AE: return X86::JAE_4; - case X86::COND_S: return X86::JS_4; - case X86::COND_NS: return X86::JNS_4; - case X86::COND_P: return X86::JP_4; - case X86::COND_NP: return X86::JNP_4; - case X86::COND_O: return X86::JO_4; - case X86::COND_NO: return X86::JNO_4; + case X86::COND_E: return X86::JE_1; + case X86::COND_NE: return X86::JNE_1; + case X86::COND_L: return X86::JL_1; + case X86::COND_LE: return X86::JLE_1; + case X86::COND_G: return X86::JG_1; + case X86::COND_GE: return X86::JGE_1; + case X86::COND_B: return X86::JB_1; + case X86::COND_BE: return X86::JBE_1; + case X86::COND_A: return X86::JA_1; + case X86::COND_AE: return X86::JAE_1; + case X86::COND_S: return X86::JS_1; + case X86::COND_NS: return X86::JNS_1; + case X86::COND_P: return X86::JP_1; + case X86::COND_NP: return X86::JNP_1; + case X86::COND_O: return X86::JO_1; + case X86::COND_NO: return X86::JNO_1; } } @@ -2996,7 +2996,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, return true; // Handle unconditional branches. - if (I->getOpcode() == X86::JMP_4) { + if (I->getOpcode() == X86::JMP_1) { UnCondBrIter = I; if (!AllowModify) { @@ -3058,7 +3058,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC)) .addMBB(UnCondBrIter->getOperand(0).getMBB()); - BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4)) + BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1)) .addMBB(TargetBB); OldInst->eraseFromParent(); @@ -3123,7 +3123,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { --I; if (I->isDebugValue()) continue; - if (I->getOpcode() != X86::JMP_4 && + if (I->getOpcode() != X86::JMP_1 && getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID) break; // Remove the branch. @@ -3148,7 +3148,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, if (Cond.empty()) { // Unconditional branch? assert(!FBB && "Unconditional branch with multiple successors!"); - BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB); + BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB); return 1; } @@ -3158,16 +3158,16 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, switch (CC) { case X86::COND_NP_OR_E: // Synthesize NP_OR_E with two branches. - BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB); + BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB); ++Count; - BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB); + BuildMI(&MBB, DL, get(X86::JE_1)).addMBB(TBB); ++Count; break; case X86::COND_NE_OR_P: // Synthesize NE_OR_P with two branches. - BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB); + BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB); ++Count; - BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB); + BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB); ++Count; break; default: { @@ -3178,7 +3178,7 @@ X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, } if (FBB) { // Two-way Conditional branch. Insert the second branch. - BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB); + BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB); ++Count; } return Count; @@ -5344,26 +5344,26 @@ bool X86InstrInfo::shouldScheduleAdjacent(MachineInstr* First, switch(Second->getOpcode()) { default: return false; - case X86::JE_4: - case X86::JNE_4: - case X86::JL_4: - case X86::JLE_4: - case X86::JG_4: - case X86::JGE_4: + case X86::JE_1: + case X86::JNE_1: + case X86::JL_1: + case X86::JLE_1: + case X86::JG_1: + case X86::JGE_1: FuseKind = FuseInc; break; - case X86::JB_4: - case X86::JBE_4: - case X86::JA_4: - case X86::JAE_4: + case X86::JB_1: + case X86::JBE_1: + case X86::JA_1: + case X86::JAE_1: FuseKind = FuseCmp; break; - case X86::JS_4: - case X86::JNS_4: - case X86::JP_4: - case X86::JNP_4: - case X86::JO_4: - case X86::JNO_4: + case X86::JS_1: + case X86::JNS_1: + case X86::JP_1: + case X86::JNP_1: + case X86::JO_1: + case X86::JNO_1: FuseKind = FuseTest; break; } @@ -5652,7 +5652,7 @@ void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { // getUnconditionalBranch and getTrap. void X86InstrInfo::getUnconditionalBranch( MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const { - Branch.setOpcode(X86::JMP_4); + Branch.setOpcode(X86::JMP_1); Branch.addOperand(MCOperand::CreateExpr(BranchTarget)); } diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index 9d9bb084719..759416d6124 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -558,28 +558,6 @@ ReSimplify: case X86::ADD32ri8_DB: OutMI.setOpcode(X86::OR32ri8); goto ReSimplify; case X86::ADD64ri8_DB: OutMI.setOpcode(X86::OR64ri8); goto ReSimplify; - // The assembler backend wants to see branches in their small form and relax - // them to their large form. The JIT can only handle the large form because - // it does not do relaxation. For now, translate the large form to the - // small one here. - case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break; - case X86::JO_4: OutMI.setOpcode(X86::JO_1); break; - case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break; - case X86::JB_4: OutMI.setOpcode(X86::JB_1); break; - case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break; - case X86::JE_4: OutMI.setOpcode(X86::JE_1); break; - case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break; - case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break; - case X86::JA_4: OutMI.setOpcode(X86::JA_1); break; - case X86::JS_4: OutMI.setOpcode(X86::JS_1); break; - case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break; - case X86::JP_4: OutMI.setOpcode(X86::JP_1); break; - case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break; - case X86::JL_4: OutMI.setOpcode(X86::JL_1); break; - case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break; - case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break; - case X86::JG_4: OutMI.setOpcode(X86::JG_1); break; - // Atomic load and store require a separate pseudo-inst because Acquire // implies mayStore and Release implies mayLoad; fix these to regular MOV // instructions here