From e5fffe9c3fa402cb5d5167327783f82b86f52b8f Mon Sep 17 00:00:00 2001 From: NAKAMURA Takumi Date: Wed, 26 Jan 2011 02:03:37 +0000 Subject: [PATCH] Fix whitespace. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@124270 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 94 ++++++++++----------- lib/Target/X86/X86InstrCompiler.td | 17 ++-- lib/Target/X86/X86InstrControl.td | 39 +++++---- lib/Target/X86/X86InstrInfo.cpp | 84 +++++++++---------- lib/Target/X86/X86InstrInfo.td | 3 +- lib/Target/X86/X86RegisterInfo.td | 24 +++--- test/CodeGen/X86/tailcallstack64.ll | 1 - utils/TableGen/CodeGenInstruction.cpp | 116 +++++++++++++------------- 8 files changed, 187 insertions(+), 191 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index f69ab6da083..edd535e43cd 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -69,7 +69,7 @@ static TargetLoweringObjectFile *createTLOF(X86TargetMachine &TM) { return new X8664_MachoTargetObjectFile(); return new TargetLoweringObjectFileMachO(); } - + if (TM.getSubtarget().isTargetELF() ){ if (is64Bit) return new X8664_ELFTargetObjectFile(TM); @@ -256,7 +256,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::UDIV, VT, Expand); setOperationAction(ISD::SREM, VT, Expand); setOperationAction(ISD::UREM, VT, Expand); - + // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences. setOperationAction(ISD::ADDC, VT, Custom); setOperationAction(ISD::ADDE, VT, Custom); @@ -369,7 +369,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Custom); setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); } - + if (!Subtarget->is64Bit()) { setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); @@ -931,7 +931,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // We want to custom lower some of our intrinsics. setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - + // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't // handle type legalization for these operations here. // @@ -948,7 +948,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SMULO, VT, Custom); setOperationAction(ISD::UMULO, VT, Custom); } - + // There are no 8-bit 3-address imul/mul instructions setOperationAction(ISD::SMULO, MVT::i8, Expand); setOperationAction(ISD::UMULO, MVT::i8, Expand); @@ -6198,7 +6198,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); MFI->setAdjustsStack(true); - + // And our return value (tls address) is in the standard call return value // location. unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; @@ -7047,7 +7047,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { (cast(Op1)->getZExtValue() == 1 || cast(Op1)->isNullValue()) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { - + // If the input is a setcc, then reuse the input setcc or use a new one with // the inverted condition. if (Op0.getOpcode() == X86ISD::SETCC) { @@ -7055,7 +7055,7 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { bool Invert = (CC == ISD::SETNE) ^ cast(Op1)->isNullValue(); if (!Invert) return Op0; - + CCode = X86::GetOppositeBranchCondition(CCode); return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, DAG.getConstant(CCode, MVT::i8), Op0.getOperand(1)); @@ -7206,7 +7206,7 @@ static bool isX86LogicalCmp(SDValue Op) { if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) return true; - + return false; } @@ -7242,24 +7242,24 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { Cond.getOperand(1).getOpcode() == X86ISD::CMP && isZero(Cond.getOperand(1).getOperand(1))) { SDValue Cmp = Cond.getOperand(1); - + unsigned CondCode =cast(Cond.getOperand(0))->getZExtValue(); - - if ((isAllOnes(Op1) || isAllOnes(Op2)) && + + if ((isAllOnes(Op1) || isAllOnes(Op2)) && (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { SDValue Y = isAllOnes(Op2) ? Op1 : Op2; SDValue CmpOp0 = Cmp.getOperand(0); Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0, DAG.getConstant(1, CmpOp0.getValueType())); - + SDValue Res = // Res = 0 or -1. DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), DAG.getConstant(X86::COND_B, MVT::i8), Cmp); - + if (isAllOnes(Op1) != (CondCode == X86::COND_E)) Res = DAG.getNOT(DL, Res, Res.getValueType()); - + ConstantSDNode *N2C = dyn_cast(Op2); if (N2C == 0 || !N2C->isNullValue()) Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); @@ -8443,7 +8443,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const { Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); // return pblendv(r, r+r, a); - R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, + R = DAG.getNode(X86ISD::PBLENDVB, dl, VT, R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); return R; } @@ -8503,12 +8503,12 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), MVT::i32); SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); - + SDValue SetCC = DAG.getNode(X86ISD::SETCC, DL, MVT::i8, DAG.getConstant(X86::COND_O, MVT::i32), SDValue(Sum.getNode(), 2)); - + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), SetCC); return Sum; } @@ -8663,9 +8663,9 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { // Let legalize expand this if it isn't a legal type yet. if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); - + SDVTList VTs = DAG.getVTList(VT, MVT::i32); - + unsigned Opc; bool ExtraOp = false; switch (Op.getOpcode()) { @@ -8675,7 +8675,7 @@ static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { case ISD::SUBC: Opc = X86ISD::SUB; break; case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break; } - + if (!ExtraOp) return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), Op.getOperand(1)); @@ -9555,14 +9555,14 @@ MachineBasicBlock * X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { DebugLoc dl = MI->getDebugLoc(); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - + // Address into RAX/EAX, other two args into ECX, EDX. unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); for (int i = 0; i < X86::AddrNumOperands; ++i) MIB.addOperand(MI->getOperand(i)); - + unsigned ValOps = X86::AddrNumOperands; BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) .addReg(MI->getOperand(ValOps).getReg()); @@ -9571,7 +9571,7 @@ X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { // The instruction doesn't actually take any operands though. BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr)); - + MI->eraseFromParent(); // The pseudo is gone now. return BB; } @@ -9580,16 +9580,16 @@ MachineBasicBlock * X86TargetLowering::EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const { DebugLoc dl = MI->getDebugLoc(); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - + // First arg in ECX, the second in EAX. BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) .addReg(MI->getOperand(0).getReg()); BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) .addReg(MI->getOperand(1).getReg()); - + // The instruction doesn't actually take any operands though. BuildMI(*BB, MI, dl, TII->get(X86::MWAITrr)); - + MI->eraseFromParent(); // The pseudo is gone now. return BB; } @@ -10195,7 +10195,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Thread synchronization. case X86::MONITOR: - return EmitMonitor(MI, BB); + return EmitMonitor(MI, BB); case X86::MWAIT: return EmitMwait(MI, BB); @@ -11116,19 +11116,19 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { if (DCI.isBeforeLegalizeOps()) return SDValue(); - + // Want to form PANDN nodes, in the hopes of then easily combining them with // OR and AND nodes to form PBLEND/PSIGN. EVT VT = N->getValueType(0); if (VT != MVT::v2i64) return SDValue(); - + SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); DebugLoc DL = N->getDebugLoc(); - + // Check LHS for vnot - if (N0.getOpcode() == ISD::XOR && + if (N0.getOpcode() == ISD::XOR && ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) return DAG.getNode(X86ISD::PANDN, DL, VT, N0.getOperand(0), N1); @@ -11136,7 +11136,7 @@ static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG, if (N1.getOpcode() == ISD::XOR && ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) return DAG.getNode(X86ISD::PANDN, DL, VT, N1.getOperand(0), N0); - + return SDValue(); } @@ -11152,7 +11152,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - + // look for psign/blend if (Subtarget->hasSSSE3()) { if (VT == MVT::v2i64) { @@ -11168,17 +11168,17 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, Y = N0.getOperand(1); if (N0.getOperand(1) == Mask) Y = N0.getOperand(0); - + // Check to see if the mask appeared in both the AND and PANDN and if (!Y.getNode()) return SDValue(); - + // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them. if (Mask.getOpcode() != ISD::BITCAST || X.getOpcode() != ISD::BITCAST || Y.getOpcode() != ISD::BITCAST) return SDValue(); - + // Look through mask bitcast. Mask = Mask.getOperand(0); EVT MaskVT = Mask.getValueType(); @@ -11187,7 +11187,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, // will be an intrinsic. if (Mask.getOpcode() != ISD::INTRINSIC_WO_CHAIN) return SDValue(); - + // FIXME: what to do for bytes, since there is a psignb/pblendvb, but // there is no psrai.b switch (cast(Mask.getOperand(0))->getZExtValue()) { @@ -11196,14 +11196,14 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, break; default: return SDValue(); } - + // Check that the SRA is all signbits. SDValue SraC = Mask.getOperand(2); unsigned SraAmt = cast(SraC)->getZExtValue(); unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); if ((SraAmt + 1) != EltBits) return SDValue(); - + DebugLoc DL = N->getDebugLoc(); // Now we know we at least have a plendvb with the mask val. See if @@ -11229,7 +11229,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, // PBLENDVB only available on SSE 4.1 if (!Subtarget->hasSSE41()) return SDValue(); - + X = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, X); Y = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Y); Mask = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Mask); @@ -11238,7 +11238,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, } } } - + // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) std::swap(N0, N1); @@ -11290,7 +11290,7 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0)); } - + return SDValue(); } @@ -11500,7 +11500,7 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { unsigned X86CC = N->getConstantOperandVal(0); SDValue EFLAG = N->getOperand(1); DebugLoc DL = N->getDebugLoc(); - + // Materialize "setb reg" as "sbb reg,reg", since it can be extended without // a zext and produces an all-ones bit which is more useful than 0/1 in some // cases. @@ -11509,10 +11509,10 @@ static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, DAG.getConstant(X86CC, MVT::i8), EFLAG), DAG.getConstant(1, MVT::i8)); - + return SDValue(); } - + // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, X86TargetLowering::DAGCombinerInfo &DCI) { @@ -11544,7 +11544,7 @@ static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, // (sub (setne X, 0), Y) -> adc -1, Y static SDValue OptimizeConditonalInDecrement(SDNode *N, SelectionDAG &DAG) { DebugLoc DL = N->getDebugLoc(); - + // Look through ZExts. SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0); if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse()) diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 4f60de0faf4..70478b8e4a9 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -849,38 +849,38 @@ def : Pat<(X86call (i64 texternalsym:$dst)), // tailcall stuff def : Pat<(X86tcret GR32_TC:$dst, imm:$off), (TCRETURNri GR32_TC:$dst, imm:$off)>, - Requires<[In32BitMode]>; + Requires<[In32BitMode]>; // FIXME: This is disabled for 32-bit PIC mode because the global base // register which is part of the address mode may be assigned a // callee-saved register. def : Pat<(X86tcret (load addr:$dst), imm:$off), (TCRETURNmi addr:$dst, imm:$off)>, - Requires<[In32BitMode, IsNotPIC]>; + Requires<[In32BitMode, IsNotPIC]>; def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off), (TCRETURNdi texternalsym:$dst, imm:$off)>, - Requires<[In32BitMode]>; + Requires<[In32BitMode]>; def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off), (TCRETURNdi texternalsym:$dst, imm:$off)>, - Requires<[In32BitMode]>; + Requires<[In32BitMode]>; def : Pat<(X86tcret GR64_TC:$dst, imm:$off), (TCRETURNri64 GR64_TC:$dst, imm:$off)>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>; def : Pat<(X86tcret (load addr:$dst), imm:$off), (TCRETURNmi64 addr:$dst, imm:$off)>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>; def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off), (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>; def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off), (TCRETURNdi64 texternalsym:$dst, imm:$off)>, - Requires<[In64BitMode]>; + Requires<[In64BitMode]>; // Normal calls, with various flavors of addresses. def : Pat<(X86call (i32 tglobaladdr:$dst)), @@ -1661,4 +1661,3 @@ def : Pat<(and GR64:$src1, i64immSExt8:$src2), (AND64ri8 GR64:$src1, i64immSExt8:$src2)>; def : Pat<(and GR64:$src1, i64immSExt32:$src2), (AND64ri32 GR64:$src1, i64immSExt32:$src2)>; - diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td index 62ab53eb09f..4d1c5f74091 100644 --- a/lib/Target/X86/X86InstrControl.td +++ b/lib/Target/X86/X86InstrControl.td @@ -1,10 +1,10 @@ //===- X86InstrControl.td - Control Flow Instructions ------*- tablegen -*-===// -// +// // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. -// +// //===----------------------------------------------------------------------===// // // This file describes the X86 jump, return, call, and related instructions. @@ -43,7 +43,7 @@ let isBarrier = 1, isBranch = 1, isTerminator = 1 in { "jmp\t$dst", [(br bb:$dst)]>; def JMP_1 : Ii8PCRel<0xEB, RawFrm, (outs), (ins brtarget8:$dst), "jmp\t$dst", []>; - def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), + def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), "jmp{q}\t$dst", []>; } @@ -108,16 +108,16 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>; - def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs), + def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs), (ins i16imm:$off, i16imm:$seg), "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize; def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs), (ins i32imm:$off, i16imm:$seg), - "ljmp{l}\t{$seg, $off|$off, $seg}", []>; + "ljmp{l}\t{$seg, $off|$off, $seg}", []>; def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst), "ljmp{q}\t{*}$dst", []>; - def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst), + def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst), "ljmp{w}\t{*}$dst", []>, OpSize; def FARJMP32m : I<0xFF, MRM5m, (outs), (ins opaque48mem:$dst), "ljmp{l}\t{*}$dst", []>; @@ -152,14 +152,14 @@ let isCall = 1 in def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops), "call{l}\t{*}$dst", [(X86call (loadi32 addr:$dst))]>, Requires<[In32BitMode]>; - - def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs), + + def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs), (ins i16imm:$off, i16imm:$seg), "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize; def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs), (ins i32imm:$off, i16imm:$seg), "lcall{l}\t{$seg, $off|$off, $seg}", []>; - + def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst), "lcall{w}\t{*}$dst", []>, OpSize; def FARCALL32m : I<0xFF, MRM3m, (outs), (ins opaque48mem:$dst), @@ -182,12 +182,12 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], Uses = [ESP] in { - def TCRETURNdi : PseudoI<(outs), + def TCRETURNdi : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops), []>; - def TCRETURNri : PseudoI<(outs), + def TCRETURNri : PseudoI<(outs), (ins GR32_TC:$dst, i32imm:$offset, variable_ops), []>; let mayLoad = 1 in - def TCRETURNmi : PseudoI<(outs), + def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset, variable_ops), []>; // FIXME: The should be pseudo instructions that are lowered when going to @@ -196,7 +196,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, (ins i32imm_pcrel:$dst, variable_ops), "jmp\t$dst # TAILCALL", []>; - def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops), + def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops), "", []>; // FIXME: Remove encoding when JIT is dead. let mayLoad = 1 in def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops), @@ -218,7 +218,7 @@ let isCall = 1 in XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS], Uses = [RSP] in { - + // NOTE: this pattern doesn't match "X86call imm", because we do not know // that the offset between an arbitrary immediate and the call will fit in // the 32-bit pcrel field that we have. @@ -232,12 +232,12 @@ let isCall = 1 in def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, Requires<[In64BitMode, NotWin64]>; - + def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst), "lcall{q}\t{*}$dst", []>; } - // FIXME: We need to teach codegen about single list of call-clobbered + // FIXME: We need to teach codegen about single list of call-clobbered // registers. let isCall = 1, isCodeGenOnly = 1 in // All calls clobber the non-callee saved registers. RSP is marked as @@ -256,10 +256,10 @@ let isCall = 1, isCodeGenOnly = 1 in def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), "call{q}\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[IsWin64]>; - def WINCALL64m : I<0xFF, MRM2m, (outs), + def WINCALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst,variable_ops), "call{q}\t{*}$dst", - [(X86call (loadi64 addr:$dst))]>, + [(X86call (loadi64 addr:$dst))]>, Requires<[IsWin64]>; } @@ -278,7 +278,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, def TCRETURNri64 : PseudoI<(outs), (ins GR64_TC:$dst, i32imm:$offset, variable_ops), []>; let mayLoad = 1 in - def TCRETURNmi64 : PseudoI<(outs), + def TCRETURNmi64 : PseudoI<(outs), (ins i64mem_TC:$dst, i32imm:$offset, variable_ops), []>; def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs), @@ -291,4 +291,3 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops), "jmp{q}\t{*}$dst # TAILCALL", []>; } - diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 73654d322fe..63dcd143b5d 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -58,7 +58,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) TB_NOT_REVERSABLE = 1U << 31, TB_FLAGS = TB_NOT_REVERSABLE }; - + static const unsigned OpTbl2Addr[][2] = { { X86::ADC32ri, X86::ADC32mi }, { X86::ADC32ri8, X86::ADC32mi8 }, @@ -231,16 +231,16 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) unsigned MemOp = OpTbl2Addr[i][1] & ~TB_FLAGS; assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?"); RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U); - + // If this is not a reversable operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE) continue; - + // Index 0, folded load and store, no alignment requirement. unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); - - assert(!MemOp2RegOpTable.count(MemOp) && + + assert(!MemOp2RegOpTable.count(MemOp) && "Duplicated entries in unfolding maps?"); MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo); } @@ -334,12 +334,12 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) unsigned Align = OpTbl0[i][3]; assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?"); RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align); - + // If this is not a reversable operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl0[i][1] & TB_NOT_REVERSABLE) continue; - + // Index 0, folded load or store. unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5); assert(!MemOp2RegOpTable.count(MemOp) && "Duplicated entries?"); @@ -461,12 +461,12 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) unsigned Align = OpTbl1[i][2]; assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries"); RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align); - + // If this is not a reversable operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl1[i][1] & TB_NOT_REVERSABLE) continue; - + // Index 1, folded load unsigned AuxInfo = 1 | (1 << 4); assert(!MemOp2RegOpTable.count(MemOp) && "Duplicate entries"); @@ -678,15 +678,15 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) unsigned RegOp = OpTbl2[i][0]; unsigned MemOp = OpTbl2[i][1] & ~TB_FLAGS; unsigned Align = OpTbl2[i][2]; - + assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!"); RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align); - + // If this is not a reversable operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl2[i][1] & TB_NOT_REVERSABLE) continue; - + // Index 2, folded load unsigned AuxInfo = 2 | (1 << 4); assert(!MemOp2RegOpTable.count(MemOp) && @@ -808,7 +808,7 @@ static bool isFrameStoreOpcode(int Opcode) { return false; } -unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, +unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const { if (isFrameLoadOpcode(MI->getOpcode())) if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) @@ -816,7 +816,7 @@ unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, return 0; } -unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, +unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, int &FrameIndex) const { if (isFrameLoadOpcode(MI->getOpcode())) { unsigned Reg; @@ -946,10 +946,10 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, isPICBase = true; } return isPICBase; - } + } return false; } - + case X86::LEA32r: case X86::LEA64r: { if (MI->getOperand(2).isImm() && @@ -1124,9 +1124,9 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo(); unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass); - + // Build and insert into an implicit UNDEF value. This is OK because - // well be shifting and then extracting the lower 16-bits. + // well be shifting and then extracting the lower 16-bits. // This has the potential to cause partial register stall. e.g. // movw (%rbp,%rcx,2), %dx // leal -65(%rdx), %esi @@ -1162,7 +1162,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, case X86::ADD16ri8: case X86::ADD16ri_DB: case X86::ADD16ri8_DB: - addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm()); + addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm()); break; case X86::ADD16rr: case X86::ADD16rr_DB: { @@ -1177,7 +1177,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, } else { leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass); // Build and insert into an implicit UNDEF value. This is OK because - // well be shifting and then extracting the lower 16-bits. + // well be shifting and then extracting the lower 16-bits. BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2); InsMI2 = BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY)) @@ -1244,7 +1244,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::SHUFPSrri: { assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!"); if (!TM.getSubtarget().hasSSE2()) return 0; - + unsigned B = MI->getOperand(1).getReg(); unsigned C = MI->getOperand(2).getReg(); if (B != C) return 0; @@ -1392,7 +1392,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, RC = X86::GR32_NOSPRegisterClass; } - + unsigned Src2 = MI->getOperand(2).getReg(); bool isKill2 = MI->getOperand(2).isKill(); @@ -1471,7 +1471,7 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, LV->replaceKillInstruction(Dest, MI, NewMI); } - MFI->insert(MBBI, NewMI); // Insert the new inst + MFI->insert(MBBI, NewMI); // Insert the new inst return NewMI; } @@ -1692,7 +1692,7 @@ X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { const TargetInstrDesc &TID = MI->getDesc(); if (!TID.isTerminator()) return false; - + // Conditional branch is a special case. if (TID.isBranch() && !TID.isBarrier()) return true; @@ -1701,7 +1701,7 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { return !isPredicated(MI); } -bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, +bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, @@ -1862,7 +1862,7 @@ unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { I = MBB.end(); ++Count; } - + return Count; } @@ -2177,7 +2177,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, MIB.addOperand(MOs[i]); if (NumAddrOps < 4) // FrameIndex only addOffset(MIB, 0); - + // Loop over the rest of the ri operands, converting them over. unsigned NumOps = MI->getDesc().getNumOperands()-2; for (unsigned i = 0; i != NumOps; ++i) { @@ -2198,7 +2198,7 @@ static MachineInstr *FuseInst(MachineFunction &MF, MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), MI->getDebugLoc(), true); MachineInstrBuilder MIB(NewMI); - + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (i == OpNo) { @@ -2247,7 +2247,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, if (isTwoAddr && NumOps >= 2 && i < 2 && MI->getOperand(0).isReg() && MI->getOperand(1).isReg() && - MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { + MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) { OpcodeTablePtr = &RegOp2MemOpTable2Addr; isTwoAddrFold = true; } else if (i == 0) { // If operand 0 @@ -2261,14 +2261,14 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); if (NewMI) return NewMI; - + OpcodeTablePtr = &RegOp2MemOpTable0; } else if (i == 1) { OpcodeTablePtr = &RegOp2MemOpTable1; } else if (i == 2) { OpcodeTablePtr = &RegOp2MemOpTable2; } - + // If table selected... if (OpcodeTablePtr) { // Find the Opcode to fuse @@ -2316,8 +2316,8 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, return NewMI; } } - - // No fusion + + // No fusion if (PrintFailedFusing && !MI->isCopy()) dbgs() << "We failed to fuse operand " << i << " in " << *MI; return NULL; @@ -2328,7 +2328,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl &Ops, int FrameIndex) const { - // Check switch flag + // Check switch flag if (NoFusing) return NULL; if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) @@ -2380,7 +2380,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, const SmallVectorImpl &Ops, MachineInstr *LoadMI) const { - // Check switch flag + // Check switch flag if (NoFusing) return NULL; if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) @@ -2523,13 +2523,13 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, const SmallVectorImpl &Ops) const { - // Check switch flag + // Check switch flag if (NoFusing) return 0; if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { switch (MI->getOpcode()) { default: return false; - case X86::TEST8rr: + case X86::TEST8rr: case X86::TEST16rr: case X86::TEST32rr: case X86::TEST64rr: @@ -2550,7 +2550,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, // instruction is different than folding it other places. It requires // replacing the *two* registers with the memory location. const DenseMap > *OpcodeTablePtr = 0; - if (isTwoAddr && NumOps >= 2 && OpNum < 2) { + if (isTwoAddr && NumOps >= 2 && OpNum < 2) { OpcodeTablePtr = &RegOp2MemOpTable2Addr; } else if (OpNum == 0) { // If operand 0 switch (Opc) { @@ -2566,7 +2566,7 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, } else if (OpNum == 2) { OpcodeTablePtr = &RegOp2MemOpTable2; } - + if (OpcodeTablePtr && OpcodeTablePtr->count(Opc)) return true; return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops); @@ -2636,7 +2636,7 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, // Emit the data processing instruction. MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true); MachineInstrBuilder MIB(DataMI); - + if (FoldedStore) MIB.addReg(Reg, RegState::Define); for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i) @@ -3156,11 +3156,11 @@ namespace { PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass); else PC = GlobalBaseReg; - + // Operand of MovePCtoStack is completely ignored by asm printer. It's // only used in JIT code emission as displacement to pc. BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0); - + // If we're using vanilla 'GOT' PIC style, we should use relative addressing // not to pc, but to _GLOBAL_OFFSET_TABLE_ external. if (TM->getSubtarget().isPICStyleGOT()) { diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index f9c0a7bbfb3..4748f130ed8 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -36,7 +36,7 @@ def SDTBinaryArithWithFlags : SDTypeProfile<2, 2, SDTCisSameAs<0, 3>, SDTCisInt<0>, SDTCisVT<1, i32>]>; -// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS +// SDTBinaryArithWithFlagsInOut - RES1, EFLAGS = op LHS, RHS, EFLAGS def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, [SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, @@ -1612,4 +1612,3 @@ def : InstAlias<"xchgb $mem, $val", (XCHG8rm GR8 :$val, i8mem :$mem)>; def : InstAlias<"xchgw $mem, $val", (XCHG16rm GR16:$val, i16mem:$mem)>; def : InstAlias<"xchgl $mem, $val", (XCHG32rm GR32:$val, i32mem:$mem)>; def : InstAlias<"xchgq $mem, $val", (XCHG64rm GR64:$val, i64mem:$mem)>; - diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index dc4c042719b..45bb9898b84 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -1,10 +1,10 @@ //===- X86RegisterInfo.td - Describe the X86 Register File --*- tablegen -*-==// -// +// // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. -// +// //===----------------------------------------------------------------------===// // // This file describes the X86 Register file, defining the registers themselves, @@ -34,8 +34,8 @@ let Namespace = "X86" in { // because the register file generator is smart enough to figure out that // AL aliases AX if we tell it that AX aliased AL (for example). - // Dwarf numbering is different for 32-bit and 64-bit, and there are - // variations by target as well. Currently the first entry is for X86-64, + // Dwarf numbering is different for 32-bit and 64-bit, and there are + // variations by target as well. Currently the first entry is for X86-64, // second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux // and debug information on X86-32/Darwin) @@ -81,7 +81,7 @@ let Namespace = "X86" in { def SP : RegisterWithSubRegs<"sp", [SPL]>, DwarfRegNum<[7, 5, 4]>; } def IP : Register<"ip">, DwarfRegNum<[16]>; - + // X86-64 only let SubRegIndices = [sub_8bit] in { def R8W : RegisterWithSubRegs<"r8w", [R8B]>, DwarfRegNum<[8, -2, -2]>; @@ -103,8 +103,8 @@ let Namespace = "X86" in { def EDI : RegisterWithSubRegs<"edi", [DI]>, DwarfRegNum<[5, 7, 7]>; def EBP : RegisterWithSubRegs<"ebp", [BP]>, DwarfRegNum<[6, 4, 5]>; def ESP : RegisterWithSubRegs<"esp", [SP]>, DwarfRegNum<[7, 5, 4]>; - def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>; - + def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[16, 8, 8]>; + // X86-64 only def R8D : RegisterWithSubRegs<"r8d", [R8W]>, DwarfRegNum<[8, -2, -2]>; def R9D : RegisterWithSubRegs<"r9d", [R9W]>, DwarfRegNum<[9, -2, -2]>; @@ -208,7 +208,7 @@ let Namespace = "X86" in { def ST4 : Register<"st(4)">, DwarfRegNum<[37, 16, 15]>; def ST5 : Register<"st(5)">, DwarfRegNum<[38, 17, 16]>; def ST6 : Register<"st(6)">, DwarfRegNum<[39, 18, 17]>; - def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>; + def ST7 : Register<"st(7)">, DwarfRegNum<[40, 19, 18]>; // Status flags register def EFLAGS : Register<"flags">; @@ -220,7 +220,7 @@ let Namespace = "X86" in { def ES : Register<"es">; def FS : Register<"fs">; def GS : Register<"gs">; - + // Debug registers def DR0 : Register<"dr0">; def DR1 : Register<"dr1">; @@ -230,7 +230,7 @@ let Namespace = "X86" in { def DR5 : Register<"dr5">; def DR6 : Register<"dr6">; def DR7 : Register<"dr7">; - + // Control registers def CR0 : Register<"cr0">; def CR1 : Register<"cr1">; @@ -261,10 +261,10 @@ let Namespace = "X86" in { // implicitly defined to be the register allocation order. // -// List call-clobbered registers before callee-save registers. RBX, RBP, (and +// List call-clobbered registers before callee-save registers. RBX, RBP, (and // R12, R13, R14, and R15 for X86-64) are callee-save registers. // In 64-mode, there are 12 additional i8 registers, SIL, DIL, BPL, SPL, and -// R8B, ... R15B. +// R8B, ... R15B. // Allocate R12 and R13 last, as these require an extra byte when // encoded in x86_64 instructions. // FIXME: Allow AH, CH, DH, BH to be used as general-purpose registers in diff --git a/test/CodeGen/X86/tailcallstack64.ll b/test/CodeGen/X86/tailcallstack64.ll index 107bdf9de3e..52b074def57 100644 --- a/test/CodeGen/X86/tailcallstack64.ll +++ b/test/CodeGen/X86/tailcallstack64.ll @@ -22,4 +22,3 @@ entry: %retval = tail call fastcc i32 @tailcallee(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5, i32 %p6, i32 %in2,i32 %tmp) ret i32 %retval } - diff --git a/utils/TableGen/CodeGenInstruction.cpp b/utils/TableGen/CodeGenInstruction.cpp index 241105f9bb8..e6e081f2d6b 100644 --- a/utils/TableGen/CodeGenInstruction.cpp +++ b/utils/TableGen/CodeGenInstruction.cpp @@ -28,15 +28,15 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { isPredicable = false; hasOptionalDef = false; isVariadic = false; - + DagInit *OutDI = R->getValueAsDag("OutOperandList"); - + if (DefInit *Init = dynamic_cast(OutDI->getOperator())) { if (Init->getDef()->getName() != "outs") throw R->getName() + ": invalid def name for output list: use 'outs'"; } else throw R->getName() + ": invalid output list: use 'outs'"; - + NumDefs = OutDI->getNumArgs(); DagInit *InDI = R->getValueAsDag("InOperandList"); @@ -45,7 +45,7 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { throw R->getName() + ": invalid def name for input list: use 'ins'"; } else throw R->getName() + ": invalid input list: use 'ins'"; - + unsigned MIOperandNo = 0; std::set OperandNames; for (unsigned i = 0, e = InDI->getNumArgs()+OutDI->getNumArgs(); i != e; ++i){ @@ -58,11 +58,11 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { ArgInit = InDI->getArg(i-NumDefs); ArgName = InDI->getArgName(i-NumDefs); } - + DefInit *Arg = dynamic_cast(ArgInit); if (!Arg) throw "Illegal operand for the '" + R->getName() + "' instruction!"; - + Record *Rec = Arg->getDef(); std::string PrintMethod = "printOperand"; std::string EncoderMethod; @@ -73,19 +73,19 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { // If there is an explicit encoder method, use it. EncoderMethod = Rec->getValueAsString("EncoderMethod"); MIOpInfo = Rec->getValueAsDag("MIOperandInfo"); - + // Verify that MIOpInfo has an 'ops' root value. if (!dynamic_cast(MIOpInfo->getOperator()) || dynamic_cast(MIOpInfo->getOperator()) ->getDef()->getName() != "ops") throw "Bad value for MIOperandInfo in operand '" + Rec->getName() + "'\n"; - + // If we have MIOpInfo, then we have #operands equal to number of entries // in MIOperandInfo. if (unsigned NumArgs = MIOpInfo->getNumArgs()) NumOps = NumArgs; - + if (Rec->isSubClassOf("PredicateOperand")) isPredicable = true; else if (Rec->isSubClassOf("OptionalDefOperand")) @@ -97,7 +97,7 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { Rec->getName() != "ptr_rc" && Rec->getName() != "unknown") throw "Unknown operand class '" + Rec->getName() + "' in '" + R->getName() + "' instruction!"; - + // Check that the operand has a name and that it's unique. if (ArgName.empty()) throw "In instruction '" + R->getName() + "', operand #" + utostr(i) + @@ -105,13 +105,13 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { if (!OperandNames.insert(ArgName).second) throw "In instruction '" + R->getName() + "', operand #" + utostr(i) + " has the same name as a previous operand!"; - + OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod, MIOperandNo, NumOps, MIOpInfo)); MIOperandNo += NumOps; } - - + + // Make sure the constraints list for each operand is large enough to hold // constraint info, even if none is present. for (unsigned i = 0, e = OperandList.size(); i != e; ++i) @@ -126,7 +126,7 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { unsigned CGIOperandList::getOperandNamed(StringRef Name) const { unsigned OpIdx; if (hasOperandNamed(Name, OpIdx)) return OpIdx; - throw "'" + TheDef->getName() + "' does not have an operand named '$" + + throw "'" + TheDef->getName() + "' does not have an operand named '$" + Name.str() + "'!"; } @@ -147,10 +147,10 @@ std::pair CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { if (Op.empty() || Op[0] != '$') throw TheDef->getName() + ": Illegal operand name: '" + Op + "'"; - + std::string OpName = Op.substr(1); std::string SubOpName; - + // Check to see if this is $foo.bar. std::string::size_type DotIdx = OpName.find_first_of("."); if (DotIdx != std::string::npos) { @@ -159,30 +159,30 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { throw TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'"; OpName = OpName.substr(0, DotIdx); } - + unsigned OpIdx = getOperandNamed(OpName); - + if (SubOpName.empty()) { // If no suboperand name was specified: // If one was needed, throw. if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp && SubOpName.empty()) throw TheDef->getName() + ": Illegal to refer to" " whole operand part of complex operand '" + Op + "'"; - + // Otherwise, return the operand. return std::make_pair(OpIdx, 0U); } - + // Find the suboperand number involved. DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo; if (MIOpInfo == 0) throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'"; - + // Find the operand with the right name. for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i) if (MIOpInfo->getArgName(i) == SubOpName) return std::make_pair(OpIdx, i); - + // Otherwise, didn't find it! throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'"; } @@ -199,7 +199,7 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { throw "Illegal format for @earlyclobber constraint: '" + CStr + "'"; Name = Name.substr(wpos); std::pair Op = Ops.ParseOperandName(Name, false); - + // Build the string for the operand if (!Ops[Op.first].Constraints[Op.second].isNone()) throw "Operand '" + Name + "' cannot have multiple constraints!"; @@ -207,33 +207,33 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { CGIOperandList::ConstraintInfo::getEarlyClobber(); return; } - + // Only other constraint is "TIED_TO" for now. std::string::size_type pos = CStr.find_first_of('='); assert(pos != std::string::npos && "Unrecognized constraint"); start = CStr.find_first_not_of(" \t"); std::string Name = CStr.substr(start, pos - start); - + // TIED_TO: $src1 = $dst wpos = Name.find_first_of(" \t"); if (wpos == std::string::npos) throw "Illegal format for tied-to constraint: '" + CStr + "'"; std::string DestOpName = Name.substr(0, wpos); std::pair DestOp = Ops.ParseOperandName(DestOpName, false); - + Name = CStr.substr(pos+1); wpos = Name.find_first_not_of(" \t"); if (wpos == std::string::npos) throw "Illegal format for tied-to constraint: '" + CStr + "'"; - + std::pair SrcOp = Ops.ParseOperandName(Name.substr(wpos), false); if (SrcOp > DestOp) throw "Illegal tied-to operand constraint '" + CStr + "'"; - - + + unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp); - + if (!Ops[DestOp.first].Constraints[DestOp.second].isNone()) throw "Operand '" + DestOpName + "' cannot have multiple constraints!"; Ops[DestOp.first].Constraints[DestOp.second] = @@ -242,16 +242,16 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { static void ParseConstraints(const std::string &CStr, CGIOperandList &Ops) { if (CStr.empty()) return; - + const std::string delims(","); std::string::size_type bidx, eidx; - + bidx = CStr.find_first_not_of(delims); while (bidx != std::string::npos) { eidx = CStr.find_first_of(delims, bidx); if (eidx == std::string::npos) eidx = CStr.length(); - + ParseConstraint(CStr.substr(bidx, eidx - bidx), Ops); bidx = CStr.find_first_not_of(delims, eidx); } @@ -262,16 +262,16 @@ void CGIOperandList::ProcessDisableEncoding(std::string DisableEncoding) { std::string OpName; tie(OpName, DisableEncoding) = getToken(DisableEncoding, " ,\t"); if (OpName.empty()) break; - + // Figure out which operand this is. std::pair Op = ParseOperandName(OpName, false); - + // Mark the operand as not-to-be encoded. if (Op.second >= OperandList[Op.first].DoNotEncode.size()) OperandList[Op.first].DoNotEncode.resize(Op.second+1); OperandList[Op.first].DoNotEncode[Op.second] = true; } - + } //===----------------------------------------------------------------------===// @@ -325,11 +325,11 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) { MVT::SimpleValueType CodeGenInstruction:: HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const { if (ImplicitDefs.empty()) return MVT::Other; - + // Check to see if the first implicit def has a resolvable type. Record *FirstImplicitDef = ImplicitDefs[0]; assert(FirstImplicitDef->isSubClassOf("Register")); - const std::vector &RegVTs = + const std::vector &RegVTs = TargetInfo.getRegisterVTs(FirstImplicitDef); if (RegVTs.size() == 1) return RegVTs[0]; @@ -342,7 +342,7 @@ HasOneImplicitDefWithKnownVT(const CodeGenTarget &TargetInfo) const { std::string CodeGenInstruction:: FlattenAsmStringVariants(StringRef Cur, unsigned Variant) { std::string Res = ""; - + for (;;) { // Find the start of the next variant string. size_t VariantsStart = 0; @@ -351,14 +351,14 @@ FlattenAsmStringVariants(StringRef Cur, unsigned Variant) { (VariantsStart == 0 || (Cur[VariantsStart-1] != '$' && Cur[VariantsStart-1] != '\\'))) break; - + // Add the prefix to the result. Res += Cur.slice(0, VariantsStart); if (VariantsStart == Cur.size()) break; - + ++VariantsStart; // Skip the '{'. - + // Scan to the end of the variants string. size_t VariantsEnd = VariantsStart; unsigned NestedBraces = 1; @@ -369,18 +369,18 @@ FlattenAsmStringVariants(StringRef Cur, unsigned Variant) { } else if (Cur[VariantsEnd] == '{') ++NestedBraces; } - + // Select the Nth variant (or empty). StringRef Selection = Cur.slice(VariantsStart, VariantsEnd); for (unsigned i = 0; i != Variant; ++i) Selection = Selection.split('|').second; Res += Selection.split('|').first; - + assert(VariantsEnd != Cur.size() && "Unterminated variants in assembly string!"); Cur = Cur.substr(VariantsEnd + 1); } - + return Res; } @@ -399,7 +399,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { throw TGError(R->getLoc(), "result of inst alias should be an instruction"); ResultInst = &T.getInstruction(DI->getDef()); - + // NameClass - If argument names are repeated, we need to verify they have // the same class. StringMap NameClass; @@ -417,7 +417,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { ADI->getDef()->getName() + "!"); Entry = ADI->getDef(); } - + // Decode and validate the arguments of the result. unsigned AliasOpNo = 0; for (unsigned i = 0, e = ResultInst->Operands.size(); i != e; ++i) { @@ -430,8 +430,8 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { " arguments, but " + ResultInst->TheDef->getName() + " instruction expects " + utostr(ResultInst->Operands.size()) + " operands!"); - - + + Init *Arg = Result->getArg(AliasOpNo); Record *ResultOpRec = ResultInst->Operands[i].Rec; @@ -441,16 +441,16 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { if (!Result->getArgName(AliasOpNo).empty()) throw TGError(R->getLoc(), "result fixed register argument must " "not have a name!"); - + if (!ResultOpRec->isSubClassOf("RegisterClass")) throw TGError(R->getLoc(), "result fixed register argument is not " "passed to a RegisterClass operand!"); - + if (!T.getRegisterClass(ResultOpRec).containsRegister(ADI->getDef())) throw TGError(R->getLoc(), "fixed register " +ADI->getDef()->getName() + " is not a member of the " + ResultOpRec->getName() + " register class!"); - + // Now that it is validated, add it. ResultOperands.push_back(ResultOperand(ADI->getDef())); ResultInstOperandIndex.push_back(i); @@ -474,7 +474,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { continue; } } - + // If the operand is a record, it must have a name, and the record type must // match up with the instruction's argument type. if (DefInit *ADI = dynamic_cast(Arg)) { @@ -485,9 +485,9 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { if (ADI->getDef() != ResultOpRec) throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) + " declared with class " + ADI->getDef()->getName() + - ", instruction operand is class " + + ", instruction operand is class " + ResultOpRec->getName()); - + // Now that it is validated, add it. ResultOperands.push_back(ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef())); @@ -495,7 +495,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { ++AliasOpNo; continue; } - + if (IntInit *II = dynamic_cast(Arg)) { // Integer arguments can't have names. if (!Result->getArgName(AliasOpNo).empty()) @@ -503,7 +503,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { " must not have a name!"); if (ResultInst->Operands[i].MINumOperands != 1 || !ResultOpRec->isSubClassOf("Operand")) - throw TGError(R->getLoc(), "invalid argument class " + + throw TGError(R->getLoc(), "invalid argument class " + ResultOpRec->getName() + " for integer result operand!"); ResultOperands.push_back(ResultOperand(II->getValue())); @@ -514,7 +514,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { throw TGError(R->getLoc(), "result of inst alias has unknown operand type"); } - + if (AliasOpNo != Result->getNumArgs()) throw TGError(R->getLoc(), "result has " + utostr(Result->getNumArgs()) + " arguments, but " + ResultInst->TheDef->getName() +