[mips] Resolve register classes dynamically using ptr_rc to reduce the number of

load/store instructions defined. Previously, we were defining load/store
instructions for each pointer size (32 and 64-bit), but now we need just one
definition.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188830 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Akira Hatanaka 2013-08-20 21:08:22 +00:00
parent 0323d4b169
commit a98a486ad1
10 changed files with 182 additions and 369 deletions

View File

@ -86,14 +86,14 @@ let DecoderNamespace = "MicroMips", Predicates = [InMicroMips] in {
SRLV_FM_MM<0xd0, 0>;
/// Load and Store Instructions - aligned
defm LB_MM : LoadM<"lb", GPR32Opnd, sextloadi8>, MMRel, LW_FM_MM<0x7>;
defm LBu_MM : LoadM<"lbu", GPR32Opnd, zextloadi8>, MMRel, LW_FM_MM<0x5>;
defm LH_MM : LoadM<"lh", GPR32Opnd, sextloadi16>, MMRel, LW_FM_MM<0xf>;
defm LHu_MM : LoadM<"lhu", GPR32Opnd, zextloadi16>, MMRel, LW_FM_MM<0xd>;
defm LW_MM : LoadM<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>;
defm SB_MM : StoreM<"sb", GPR32Opnd, truncstorei8>, MMRel, LW_FM_MM<0x6>;
defm SH_MM : StoreM<"sh", GPR32Opnd, truncstorei16>, MMRel, LW_FM_MM<0xe>;
defm SW_MM : StoreM<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>;
def LB_MM : Load<"lb", GPR32Opnd, sextloadi8>, MMRel, LW_FM_MM<0x7>;
def LBu_MM : Load<"lbu", GPR32Opnd, zextloadi8>, MMRel, LW_FM_MM<0x5>;
def LH_MM : Load<"lh", GPR32Opnd, sextloadi16>, MMRel, LW_FM_MM<0xf>;
def LHu_MM : Load<"lhu", GPR32Opnd, zextloadi16>, MMRel, LW_FM_MM<0xd>;
def LW_MM : Load<"lw", GPR32Opnd>, MMRel, LW_FM_MM<0x3f>;
def SB_MM : Store<"sb", GPR32Opnd, truncstorei8>, MMRel, LW_FM_MM<0x6>;
def SH_MM : Store<"sh", GPR32Opnd, truncstorei16>, MMRel, LW_FM_MM<0xe>;
def SW_MM : Store<"sw", GPR32Opnd>, MMRel, LW_FM_MM<0x3e>;
/// Load and Store Instructions - unaligned
def LWL_MM : LoadLeftRightMM<"lwl", MipsLWL, GPR32Opnd, mem_mm_12>,

View File

@ -34,36 +34,21 @@ def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>;
//===----------------------------------------------------------------------===//
// Instructions specific format
//===----------------------------------------------------------------------===//
let DecoderNamespace = "Mips64" in {
multiclass Atomic2Ops64<PatFrag Op> {
def NAME : Atomic2Ops<Op, GPR64, GPR32>, Requires<[NotN64, HasStdEnc]>;
def _P8 : Atomic2Ops<Op, GPR64, GPR64>, Requires<[IsN64, HasStdEnc]>;
}
multiclass AtomicCmpSwap64<PatFrag Op> {
def NAME : AtomicCmpSwap<Op, GPR64, GPR32>,
Requires<[NotN64, HasStdEnc]>;
def _P8 : AtomicCmpSwap<Op, GPR64, GPR64>,
Requires<[IsN64, HasStdEnc]>;
}
}
let usesCustomInserter = 1, Predicates = [HasStdEnc],
DecoderNamespace = "Mips64" in {
defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64>;
defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64>;
defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64>;
defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64>;
defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64>;
defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64>;
defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64>;
defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64>;
let usesCustomInserter = 1 in {
def ATOMIC_LOAD_ADD_I64 : Atomic2Ops<atomic_load_add_64, GPR64>;
def ATOMIC_LOAD_SUB_I64 : Atomic2Ops<atomic_load_sub_64, GPR64>;
def ATOMIC_LOAD_AND_I64 : Atomic2Ops<atomic_load_and_64, GPR64>;
def ATOMIC_LOAD_OR_I64 : Atomic2Ops<atomic_load_or_64, GPR64>;
def ATOMIC_LOAD_XOR_I64 : Atomic2Ops<atomic_load_xor_64, GPR64>;
def ATOMIC_LOAD_NAND_I64 : Atomic2Ops<atomic_load_nand_64, GPR64>;
def ATOMIC_SWAP_I64 : Atomic2Ops<atomic_swap_64, GPR64>;
def ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap<atomic_cmp_swap_64, GPR64>;
}
/// Pseudo instructions for loading and storing accumulator registers.
let isPseudo = 1, isCodeGenOnly = 1 in {
defm LOAD_ACC128 : LoadM<"", ACC128>;
defm STORE_ACC128 : StoreM<"", ACC128>;
def LOAD_ACC128 : Load<"", ACC128>;
def STORE_ACC128 : Store<"", ACC128>;
}
//===----------------------------------------------------------------------===//
@ -134,43 +119,36 @@ let Predicates = [HasMips64r2, HasStdEnc] in {
/// Load and Store Instructions
/// aligned
let isCodeGenOnly = 1 in {
defm LB64 : LoadM<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>;
defm LBu64 : LoadM<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>;
defm LH64 : LoadM<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>;
defm LHu64 : LoadM<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>;
defm LW64 : LoadM<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>;
defm SB64 : StoreM<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>;
defm SH64 : StoreM<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>;
defm SW64 : StoreM<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>;
def LB64 : Load<"lb", GPR64Opnd, sextloadi8, IILoad>, LW_FM<0x20>;
def LBu64 : Load<"lbu", GPR64Opnd, zextloadi8, IILoad>, LW_FM<0x24>;
def LH64 : Load<"lh", GPR64Opnd, sextloadi16, IILoad>, LW_FM<0x21>;
def LHu64 : Load<"lhu", GPR64Opnd, zextloadi16, IILoad>, LW_FM<0x25>;
def LW64 : Load<"lw", GPR64Opnd, sextloadi32, IILoad>, LW_FM<0x23>;
def SB64 : Store<"sb", GPR64Opnd, truncstorei8, IIStore>, LW_FM<0x28>;
def SH64 : Store<"sh", GPR64Opnd, truncstorei16, IIStore>, LW_FM<0x29>;
def SW64 : Store<"sw", GPR64Opnd, truncstorei32, IIStore>, LW_FM<0x2b>;
}
defm LWu : LoadM<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>;
defm LD : LoadM<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>;
defm SD : StoreM<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>;
def LWu : Load<"lwu", GPR64Opnd, zextloadi32, IILoad>, LW_FM<0x27>;
def LD : Load<"ld", GPR64Opnd, load, IILoad>, LW_FM<0x37>;
def SD : Store<"sd", GPR64Opnd, store, IIStore>, LW_FM<0x3f>;
/// load/store left/right
let isCodeGenOnly = 1 in {
defm LWL64 : LoadLeftRightM<"lwl", MipsLWL, GPR64Opnd>, LW_FM<0x22>;
defm LWR64 : LoadLeftRightM<"lwr", MipsLWR, GPR64Opnd>, LW_FM<0x26>;
defm SWL64 : StoreLeftRightM<"swl", MipsSWL, GPR64Opnd>, LW_FM<0x2a>;
defm SWR64 : StoreLeftRightM<"swr", MipsSWR, GPR64Opnd>, LW_FM<0x2e>;
def LWL64 : LoadLeftRight<"lwl", MipsLWL, GPR64Opnd>, LW_FM<0x22>;
def LWR64 : LoadLeftRight<"lwr", MipsLWR, GPR64Opnd>, LW_FM<0x26>;
def SWL64 : StoreLeftRight<"swl", MipsSWL, GPR64Opnd>, LW_FM<0x2a>;
def SWR64 : StoreLeftRight<"swr", MipsSWR, GPR64Opnd>, LW_FM<0x2e>;
}
defm LDL : LoadLeftRightM<"ldl", MipsLDL, GPR64Opnd>, LW_FM<0x1a>;
defm LDR : LoadLeftRightM<"ldr", MipsLDR, GPR64Opnd>, LW_FM<0x1b>;
defm SDL : StoreLeftRightM<"sdl", MipsSDL, GPR64Opnd>, LW_FM<0x2c>;
defm SDR : StoreLeftRightM<"sdr", MipsSDR, GPR64Opnd>, LW_FM<0x2d>;
def LDL : LoadLeftRight<"ldl", MipsLDL, GPR64Opnd>, LW_FM<0x1a>;
def LDR : LoadLeftRight<"ldr", MipsLDR, GPR64Opnd>, LW_FM<0x1b>;
def SDL : StoreLeftRight<"sdl", MipsSDL, GPR64Opnd>, LW_FM<0x2c>;
def SDR : StoreLeftRight<"sdr", MipsSDR, GPR64Opnd>, LW_FM<0x2d>;
/// Load-linked, Store-conditional
let Predicates = [NotN64, HasStdEnc] in {
def LLD : LLBase<"lld", GPR64Opnd, mem>, LW_FM<0x34>;
def SCD : SCBase<"scd", GPR64Opnd, mem>, LW_FM<0x3c>;
}
let Predicates = [IsN64, HasStdEnc], isCodeGenOnly = 1 in {
def LLD_P8 : LLBase<"lld", GPR64Opnd, mem64>, LW_FM<0x34>;
def SCD_P8 : SCBase<"scd", GPR64Opnd, mem64>, LW_FM<0x3c>;
}
def LLD : LLBase<"lld", GPR64Opnd>, LW_FM<0x34>;
def SCD : SCBase<"scd", GPR64Opnd>, LW_FM<0x3c>;
/// Jump and Branch Instructions
let isCodeGenOnly = 1 in {
@ -221,7 +199,7 @@ def DCLO : CountLeading1<"dclo", GPR64Opnd>, CLO_FM<0x25>;
def DSBH : SubwordSwap<"dsbh", GPR64Opnd>, SEB_FM<2, 0x24>;
def DSHD : SubwordSwap<"dshd", GPR64Opnd>, SEB_FM<5, 0x24>;
def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd, mem_ea_64>, LW_FM<0x19>;
def LEA_ADDiu64 : EffectiveAddress<"daddiu", GPR64Opnd>, LW_FM<0x19>;
let isCodeGenOnly = 1 in
def RDHWR64 : ReadHardware<GPR64Opnd, HWRegsOpnd>, RDHWR_FM;
@ -251,18 +229,12 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in {
//===----------------------------------------------------------------------===//
// extended loads
let Predicates = [NotN64, HasStdEnc] in {
let Predicates = [HasStdEnc] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>;
def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>;
def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>;
}
let Predicates = [IsN64, HasStdEnc] in {
def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>;
def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>;
def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64_P8 addr:$src)>;
def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64_P8 addr:$src)>;
}
// hi/lo relocs
def : MipsPat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>;

View File

@ -1242,12 +1242,12 @@ def PREPEND : PREPEND_ENC, PREPEND_DESC;
// Pseudos.
let isPseudo = 1, isCodeGenOnly = 1 in {
// Pseudo instructions for loading and storing accumulator registers.
defm LOAD_ACC64DSP : LoadM<"", ACC64DSPOpnd>;
defm STORE_ACC64DSP : StoreM<"", ACC64DSPOpnd>;
def LOAD_ACC64DSP : Load<"", ACC64DSPOpnd>;
def STORE_ACC64DSP : Store<"", ACC64DSPOpnd>;
// Pseudos for loading and storing ccond field of DSP control register.
defm LOAD_CCOND_DSP : LoadM<"load_ccond_dsp", DSPCC>;
defm STORE_CCOND_DSP : StoreM<"store_ccond_dsp", DSPCC>;
def LOAD_CCOND_DSP : Load<"load_ccond_dsp", DSPCC>;
def STORE_CCOND_DSP : Store<"store_ccond_dsp", DSPCC>;
}
// Pseudo CMP and PICK instructions.

View File

@ -797,107 +797,75 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
default:
llvm_unreachable("Unexpected instr type to insert");
case Mips::ATOMIC_LOAD_ADD_I8:
case Mips::ATOMIC_LOAD_ADD_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I16:
case Mips::ATOMIC_LOAD_ADD_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I32:
case Mips::ATOMIC_LOAD_ADD_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::ADDu);
case Mips::ATOMIC_LOAD_ADD_I64:
case Mips::ATOMIC_LOAD_ADD_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DADDu);
case Mips::ATOMIC_LOAD_AND_I8:
case Mips::ATOMIC_LOAD_AND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I16:
case Mips::ATOMIC_LOAD_AND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I32:
case Mips::ATOMIC_LOAD_AND_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::AND);
case Mips::ATOMIC_LOAD_AND_I64:
case Mips::ATOMIC_LOAD_AND_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::AND64);
case Mips::ATOMIC_LOAD_OR_I8:
case Mips::ATOMIC_LOAD_OR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I16:
case Mips::ATOMIC_LOAD_OR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I32:
case Mips::ATOMIC_LOAD_OR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::OR);
case Mips::ATOMIC_LOAD_OR_I64:
case Mips::ATOMIC_LOAD_OR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::OR64);
case Mips::ATOMIC_LOAD_XOR_I8:
case Mips::ATOMIC_LOAD_XOR_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I16:
case Mips::ATOMIC_LOAD_XOR_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I32:
case Mips::ATOMIC_LOAD_XOR_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::XOR);
case Mips::ATOMIC_LOAD_XOR_I64:
case Mips::ATOMIC_LOAD_XOR_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::XOR64);
case Mips::ATOMIC_LOAD_NAND_I8:
case Mips::ATOMIC_LOAD_NAND_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0, true);
case Mips::ATOMIC_LOAD_NAND_I16:
case Mips::ATOMIC_LOAD_NAND_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0, true);
case Mips::ATOMIC_LOAD_NAND_I32:
case Mips::ATOMIC_LOAD_NAND_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0, true);
case Mips::ATOMIC_LOAD_NAND_I64:
case Mips::ATOMIC_LOAD_NAND_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0, true);
case Mips::ATOMIC_LOAD_SUB_I8:
case Mips::ATOMIC_LOAD_SUB_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I16:
case Mips::ATOMIC_LOAD_SUB_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I32:
case Mips::ATOMIC_LOAD_SUB_I32_P8:
return emitAtomicBinary(MI, BB, 4, Mips::SUBu);
case Mips::ATOMIC_LOAD_SUB_I64:
case Mips::ATOMIC_LOAD_SUB_I64_P8:
return emitAtomicBinary(MI, BB, 8, Mips::DSUBu);
case Mips::ATOMIC_SWAP_I8:
case Mips::ATOMIC_SWAP_I8_P8:
return emitAtomicBinaryPartword(MI, BB, 1, 0);
case Mips::ATOMIC_SWAP_I16:
case Mips::ATOMIC_SWAP_I16_P8:
return emitAtomicBinaryPartword(MI, BB, 2, 0);
case Mips::ATOMIC_SWAP_I32:
case Mips::ATOMIC_SWAP_I32_P8:
return emitAtomicBinary(MI, BB, 4, 0);
case Mips::ATOMIC_SWAP_I64:
case Mips::ATOMIC_SWAP_I64_P8:
return emitAtomicBinary(MI, BB, 8, 0);
case Mips::ATOMIC_CMP_SWAP_I8:
case Mips::ATOMIC_CMP_SWAP_I8_P8:
return emitAtomicCmpSwapPartword(MI, BB, 1);
case Mips::ATOMIC_CMP_SWAP_I16:
case Mips::ATOMIC_CMP_SWAP_I16_P8:
return emitAtomicCmpSwapPartword(MI, BB, 2);
case Mips::ATOMIC_CMP_SWAP_I32:
case Mips::ATOMIC_CMP_SWAP_I32_P8:
return emitAtomicCmpSwap(MI, BB, 4);
case Mips::ATOMIC_CMP_SWAP_I64:
case Mips::ATOMIC_CMP_SWAP_I64_P8:
return emitAtomicCmpSwap(MI, BB, 8);
case Mips::PseudoSDIV:
case Mips::PseudoUDIV:
@ -924,16 +892,16 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
unsigned LL, SC, AND, NOR, ZERO, BEQ;
if (Size == 4) {
LL = IsN64 ? Mips::LL_P8 : Mips::LL;
SC = IsN64 ? Mips::SC_P8 : Mips::SC;
LL = Mips::LL;
SC = Mips::SC;
AND = Mips::AND;
NOR = Mips::NOR;
ZERO = Mips::ZERO;
BEQ = Mips::BEQ;
}
else {
LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
LL = Mips::LLD;
SC = Mips::SCD;
AND = Mips::AND64;
NOR = Mips::NOR64;
ZERO = Mips::ZERO_64;
@ -1009,8 +977,6 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@ -1107,7 +1073,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
// beq success,$0,loopMBB
BB = loopMBB;
BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
if (Nand) {
// and andres, oldval, incr2
// nor binopres, $0, andres
@ -1130,7 +1096,7 @@ MipsTargetLowering::emitAtomicBinaryPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal0).addReg(NewVal);
BuildMI(BB, DL, TII->get(SC), Success)
BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
@ -1171,15 +1137,15 @@ MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
unsigned LL, SC, ZERO, BNE, BEQ;
if (Size == 4) {
LL = IsN64 ? Mips::LL_P8 : Mips::LL;
SC = IsN64 ? Mips::SC_P8 : Mips::SC;
LL = Mips::LL;
SC = Mips::SC;
ZERO = Mips::ZERO;
BNE = Mips::BNE;
BEQ = Mips::BEQ;
}
else {
LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
LL = Mips::LLD;
SC = Mips::SCD;
ZERO = Mips::ZERO_64;
BNE = Mips::BNE64;
BEQ = Mips::BEQ64;
@ -1251,8 +1217,6 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
unsigned Dest = MI->getOperand(0).getReg();
unsigned Ptr = MI->getOperand(1).getReg();
@ -1349,7 +1313,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
// and maskedoldval0,oldval,mask
// bne maskedoldval0,shiftedcmpval,sinkMBB
BB = loop1MBB;
BuildMI(BB, DL, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
.addReg(OldVal).addReg(Mask);
BuildMI(BB, DL, TII->get(Mips::BNE))
@ -1365,7 +1329,7 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
.addReg(OldVal).addReg(Mask2);
BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
.addReg(MaskedOldVal1).addReg(ShiftedNewVal);
BuildMI(BB, DL, TII->get(SC), Success)
BuildMI(BB, DL, TII->get(Mips::SC), Success)
.addReg(StoreVal).addReg(AlignedAddr).addImm(0);
BuildMI(BB, DL, TII->get(Mips::BEQ))
.addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);

View File

@ -143,16 +143,16 @@ class MTC1_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC,
[(set DstRC:$fs, (OpNode SrcRC:$rt))], Itin, FrmFR>;
class LW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
SDPatternOperator OpNode= null_frag> :
InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
let mayLoad = 1;
}
class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
Operand MemOpnd, SDPatternOperator OpNode= null_frag> :
InstSE<(outs), (ins RC:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
SDPatternOperator OpNode= null_frag> :
InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI> {
let DecoderMethod = "DecodeFMem";
let mayStore = 1;
@ -353,39 +353,23 @@ def FMOV_D64 : ABSS_FT<"mov.d", FGR64Opnd, FGR64Opnd, IIFmove>,
}
/// Floating Point Memory Instructions
let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
def LWC1_P8 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, mem64, load>,
LW_FM<0x31>;
def SWC1_P8 : SW_FT<"swc1", FGR32Opnd, IIFStore, mem64, store>,
LW_FM<0x39>;
def LDC164_P8 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, mem64, load>,
LW_FM<0x35> {
let isCodeGenOnly =1;
}
def SDC164_P8 : SW_FT<"sdc1", FGR64Opnd, IIFStore, mem64, store>,
LW_FM<0x3d> {
let isCodeGenOnly =1;
}
let Predicates = [HasStdEnc] in {
def LWC1 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, load>, LW_FM<0x31>;
def SWC1 : SW_FT<"swc1", FGR32Opnd, IIFStore, store>, LW_FM<0x39>;
}
let Predicates = [NotN64, HasStdEnc] in {
def LWC1 : LW_FT<"lwc1", FGR32Opnd, IIFLoad, mem, load>, LW_FM<0x31>;
def SWC1 : SW_FT<"swc1", FGR32Opnd, IIFStore, mem, store>, LW_FM<0x39>;
let Predicates = [HasMips64, HasStdEnc], DecoderNamespace = "Mips64" in {
def LDC164 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, load>, LW_FM<0x35>;
def SDC164 : SW_FT<"sdc1", FGR64Opnd, IIFStore, store>, LW_FM<0x3d>;
}
let Predicates = [NotN64, HasMips64, HasStdEnc],
DecoderNamespace = "Mips64" in {
def LDC164 : LW_FT<"ldc1", FGR64Opnd, IIFLoad, mem, load>, LW_FM<0x35>;
def SDC164 : SW_FT<"sdc1", FGR64Opnd, IIFStore, mem, store>, LW_FM<0x3d>;
}
let Predicates = [NotN64, NotMips64, HasStdEnc] in {
let Predicates = [NotMips64, HasStdEnc] in {
let isPseudo = 1, isCodeGenOnly = 1 in {
def PseudoLDC1 : LW_FT<"", AFGR64Opnd, IIFLoad, mem, load>;
def PseudoSDC1 : SW_FT<"", AFGR64Opnd, IIFStore, mem, store>;
def PseudoLDC1 : LW_FT<"", AFGR64Opnd, IIFLoad, load>;
def PseudoSDC1 : SW_FT<"", AFGR64Opnd, IIFStore, store>;
}
def LDC1 : LW_FT<"ldc1", AFGR64Opnd, IIFLoad, mem>, LW_FM<0x35>;
def SDC1 : SW_FT<"sdc1", AFGR64Opnd, IIFStore, mem>, LW_FM<0x3d>;
def LDC1 : LW_FT<"ldc1", AFGR64Opnd, IIFLoad>, LW_FM<0x35>;
def SDC1 : SW_FT<"sdc1", AFGR64Opnd, IIFStore>, LW_FM<0x3d>;
}
// Indexed loads and stores.
@ -611,24 +595,17 @@ let Predicates = [IsFP64bit, HasStdEnc] in {
// Patterns for loads/stores with a reg+imm operand.
let AddedComplexity = 40 in {
let Predicates = [IsN64, HasStdEnc] in {
def : LoadRegImmPat<LWC1_P8, f32, load>;
def : StoreRegImmPat<SWC1_P8, f32>;
def : LoadRegImmPat<LDC164_P8, f64, load>;
def : StoreRegImmPat<SDC164_P8, f64>;
}
let Predicates = [NotN64, HasStdEnc] in {
let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LWC1, f32, load>;
def : StoreRegImmPat<SWC1, f32>;
}
let Predicates = [NotN64, HasMips64, HasStdEnc] in {
let Predicates = [HasMips64, HasStdEnc] in {
def : LoadRegImmPat<LDC164, f64, load>;
def : StoreRegImmPat<SDC164, f64>;
}
let Predicates = [NotN64, NotMips64, HasStdEnc] in {
let Predicates = [NotMips64, HasStdEnc] in {
def : LoadRegImmPat<PseudoLDC1, f64, load>;
def : StoreRegImmPat<PseudoSDC1, f64>;
}

View File

@ -278,34 +278,23 @@ def MipsMemAsmOperand : AsmOperandClass {
}
// Address operand
def mem : Operand<i32> {
def mem : Operand<iPTR> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops GPR32, simm16);
let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemAsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def mem64 : Operand<i64> {
let PrintMethod = "printMemOperand";
let MIOperandInfo = (ops GPR64, simm16_64);
let EncoderMethod = "getMemEncoding";
let ParserMatchClass = MipsMemAsmOperand;
let OperandType = "OPERAND_MEMORY";
}
def mem_ea : Operand<i32> {
def mem_ea : Operand<iPTR> {
let PrintMethod = "printMemOperandEA";
let MIOperandInfo = (ops GPR32, simm16);
let MIOperandInfo = (ops ptr_rc, simm16);
let EncoderMethod = "getMemEncoding";
let OperandType = "OPERAND_MEMORY";
}
def mem_ea_64 : Operand<i64> {
let PrintMethod = "printMemOperandEA";
let MIOperandInfo = (ops GPR64, simm16_64);
let EncoderMethod = "getMemEncoding";
let OperandType = "OPERAND_MEMORY";
def PtrRC : Operand<iPTR> {
let MIOperandInfo = (ops ptr_rc);
}
// size operand of ext instruction
@ -457,91 +446,39 @@ class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern,
}
// Memory Load/Store
class Load<string opstr, SDPatternOperator OpNode, DAGOperand RO,
InstrItinClass Itin, Operand MemOpnd, ComplexPattern Addr,
string ofsuffix> :
InstSE<(outs RO:$rt), (ins MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RO:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI,
!strconcat(opstr, ofsuffix)> {
class Load<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RO:$rt, (OpNode Addr:$addr))], NoItinerary, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let canFoldAsLoad = 1;
let mayLoad = 1;
}
class Store<string opstr, SDPatternOperator OpNode, DAGOperand RO,
InstrItinClass Itin, Operand MemOpnd, ComplexPattern Addr,
string ofsuffix> :
InstSE<(outs), (ins RO:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RO:$rt, Addr:$addr)], NoItinerary, FrmI,
!strconcat(opstr, ofsuffix)> {
class Store<string opstr, DAGOperand RO, SDPatternOperator OpNode = null_frag,
InstrItinClass Itin = NoItinerary, ComplexPattern Addr = addr> :
InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RO:$rt, Addr:$addr)], NoItinerary, FrmI, opstr> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
}
multiclass LoadM<string opstr, DAGOperand RO,
SDPatternOperator OpNode = null_frag,
InstrItinClass Itin = NoItinerary,
ComplexPattern Addr = addr> {
def NAME : Load<opstr, OpNode, RO, Itin, mem, Addr, "">,
Requires<[NotN64, HasStdEnc]>;
def _P8 : Load<opstr, OpNode, RO, Itin, mem64, Addr, "_p8">,
Requires<[IsN64, HasStdEnc]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
multiclass StoreM<string opstr, DAGOperand RO,
SDPatternOperator OpNode = null_frag,
InstrItinClass Itin = NoItinerary,
ComplexPattern Addr = addr> {
def NAME : Store<opstr, OpNode, RO, Itin, mem, Addr, "">,
Requires<[NotN64, HasStdEnc]>;
def _P8 : Store<opstr, OpNode, RO, Itin, mem64, Addr, "_p8">,
Requires<[IsN64, HasStdEnc]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
// Load/Store Left/Right
let canFoldAsLoad = 1 in
class LoadLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
Operand MemOpnd> :
InstSE<(outs RO:$rt), (ins MemOpnd:$addr, RO:$src),
class LoadLeftRight<string opstr, SDNode OpNode, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem:$addr, RO:$src),
!strconcat(opstr, "\t$rt, $addr"),
[(set RO:$rt, (OpNode addr:$addr, RO:$src))], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
string Constraints = "$src = $rt";
}
class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO,
Operand MemOpnd>:
InstSE<(outs), (ins RO:$rt, MemOpnd:$addr), !strconcat(opstr, "\t$rt, $addr"),
class StoreLeftRight<string opstr, SDNode OpNode, RegisterOperand RO> :
InstSE<(outs), (ins RO:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(OpNode RO:$rt, addr:$addr)], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
}
multiclass LoadLeftRightM<string opstr, SDNode OpNode, RegisterOperand RO> {
def NAME : LoadLeftRight<opstr, OpNode, RO, mem>,
Requires<[NotN64, HasStdEnc, NotInMicroMips]>;
def _P8 : LoadLeftRight<opstr, OpNode, RO, mem64>,
Requires<[IsN64, HasStdEnc]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
multiclass StoreLeftRightM<string opstr, SDNode OpNode, RegisterOperand RO> {
def NAME : StoreLeftRight<opstr, OpNode, RO, mem>,
Requires<[NotN64, HasStdEnc, NotInMicroMips]>;
def _P8 : StoreLeftRight<opstr, OpNode, RO, mem64>,
Requires<[IsN64, HasStdEnc]> {
let DecoderNamespace = "Mips64";
let isCodeGenOnly = 1;
}
}
// Conditional Branch
class CBranch<string opstr, PatFrag cond_op, RegisterOperand RO> :
InstSE<(outs), (ins RO:$rs, RO:$rt, brtarget:$offset),
@ -749,8 +686,8 @@ class MoveToLOHI<string opstr, RegisterOperand RO, list<Register> DefRegs>:
let neverHasSideEffects = 1;
}
class EffectiveAddress<string opstr, RegisterOperand RO, Operand Mem> :
InstSE<(outs RO:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
class EffectiveAddress<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem_ea:$addr), !strconcat(opstr, "\t$rt, $addr"),
[(set RO:$rt, addr:$addr)], NoItinerary, FrmI> {
let isCodeGenOnly = 1;
let DecoderMethod = "DecodeMem";
@ -807,36 +744,24 @@ class InsBase<string opstr, RegisterOperand RO>:
}
// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*).
class Atomic2Ops<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr),
[(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>;
multiclass Atomic2Ops32<PatFrag Op> {
def NAME : Atomic2Ops<Op, GPR32, GPR32>, Requires<[NotN64, HasStdEnc]>;
def _P8 : Atomic2Ops<Op, GPR32, GPR64>, Requires<[IsN64, HasStdEnc]>;
}
class Atomic2Ops<PatFrag Op, RegisterClass DRC> :
PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$incr),
[(set DRC:$dst, (Op iPTR:$ptr, DRC:$incr))]>;
// Atomic Compare & Swap.
class AtomicCmpSwap<PatFrag Op, RegisterClass DRC, RegisterClass PRC> :
PseudoSE<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap),
[(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>;
class AtomicCmpSwap<PatFrag Op, RegisterClass DRC> :
PseudoSE<(outs DRC:$dst), (ins PtrRC:$ptr, DRC:$cmp, DRC:$swap),
[(set DRC:$dst, (Op iPTR:$ptr, DRC:$cmp, DRC:$swap))]>;
multiclass AtomicCmpSwap32<PatFrag Op> {
def NAME : AtomicCmpSwap<Op, GPR32, GPR32>,
Requires<[NotN64, HasStdEnc]>;
def _P8 : AtomicCmpSwap<Op, GPR32, GPR64>,
Requires<[IsN64, HasStdEnc]>;
}
class LLBase<string opstr, RegisterOperand RO, Operand Mem> :
InstSE<(outs RO:$rt), (ins Mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
class LLBase<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
[], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayLoad = 1;
}
class SCBase<string opstr, RegisterOperand RO, Operand Mem> :
InstSE<(outs RO:$dst), (ins RO:$rt, Mem:$addr),
class SCBase<string opstr, RegisterOperand RO> :
InstSE<(outs RO:$dst), (ins RO:$rt, mem:$addr),
!strconcat(opstr, "\t$rt, $addr"), [], NoItinerary, FrmI> {
let DecoderMethod = "DecodeMem";
let mayStore = 1;
@ -867,38 +792,38 @@ def ADJCALLSTACKUP : MipsPseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
}
let usesCustomInserter = 1 in {
defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8>;
defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16>;
defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32>;
defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8>;
defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16>;
defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32>;
defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8>;
defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16>;
defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32>;
defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8>;
defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16>;
defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32>;
defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8>;
defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16>;
defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32>;
defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8>;
defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16>;
defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32>;
def ATOMIC_LOAD_ADD_I8 : Atomic2Ops<atomic_load_add_8, GPR32>;
def ATOMIC_LOAD_ADD_I16 : Atomic2Ops<atomic_load_add_16, GPR32>;
def ATOMIC_LOAD_ADD_I32 : Atomic2Ops<atomic_load_add_32, GPR32>;
def ATOMIC_LOAD_SUB_I8 : Atomic2Ops<atomic_load_sub_8, GPR32>;
def ATOMIC_LOAD_SUB_I16 : Atomic2Ops<atomic_load_sub_16, GPR32>;
def ATOMIC_LOAD_SUB_I32 : Atomic2Ops<atomic_load_sub_32, GPR32>;
def ATOMIC_LOAD_AND_I8 : Atomic2Ops<atomic_load_and_8, GPR32>;
def ATOMIC_LOAD_AND_I16 : Atomic2Ops<atomic_load_and_16, GPR32>;
def ATOMIC_LOAD_AND_I32 : Atomic2Ops<atomic_load_and_32, GPR32>;
def ATOMIC_LOAD_OR_I8 : Atomic2Ops<atomic_load_or_8, GPR32>;
def ATOMIC_LOAD_OR_I16 : Atomic2Ops<atomic_load_or_16, GPR32>;
def ATOMIC_LOAD_OR_I32 : Atomic2Ops<atomic_load_or_32, GPR32>;
def ATOMIC_LOAD_XOR_I8 : Atomic2Ops<atomic_load_xor_8, GPR32>;
def ATOMIC_LOAD_XOR_I16 : Atomic2Ops<atomic_load_xor_16, GPR32>;
def ATOMIC_LOAD_XOR_I32 : Atomic2Ops<atomic_load_xor_32, GPR32>;
def ATOMIC_LOAD_NAND_I8 : Atomic2Ops<atomic_load_nand_8, GPR32>;
def ATOMIC_LOAD_NAND_I16 : Atomic2Ops<atomic_load_nand_16, GPR32>;
def ATOMIC_LOAD_NAND_I32 : Atomic2Ops<atomic_load_nand_32, GPR32>;
defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8>;
defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16>;
defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32>;
def ATOMIC_SWAP_I8 : Atomic2Ops<atomic_swap_8, GPR32>;
def ATOMIC_SWAP_I16 : Atomic2Ops<atomic_swap_16, GPR32>;
def ATOMIC_SWAP_I32 : Atomic2Ops<atomic_swap_32, GPR32>;
defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8>;
defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16>;
defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32>;
def ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap<atomic_cmp_swap_8, GPR32>;
def ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap<atomic_cmp_swap_16, GPR32>;
def ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap<atomic_cmp_swap_32, GPR32>;
}
/// Pseudo instructions for loading and storing accumulator registers.
let isPseudo = 1, isCodeGenOnly = 1 in {
defm LOAD_ACC64 : LoadM<"", ACC64>;
defm STORE_ACC64 : StoreM<"", ACC64>;
def LOAD_ACC64 : Load<"", ACC64>;
def STORE_ACC64 : Store<"", ACC64>;
}
//===----------------------------------------------------------------------===//
@ -969,23 +894,23 @@ let Predicates = [HasMips32r2, HasStdEnc] in {
/// Load and Store Instructions
/// aligned
defm LB : LoadM<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>;
defm LBu : LoadM<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel,
LW_FM<0x24>;
defm LH : LoadM<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel,
LW_FM<0x21>;
defm LHu : LoadM<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>;
defm LW : LoadM<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel,
LW_FM<0x23>;
defm SB : StoreM<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>;
defm SH : StoreM<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>;
defm SW : StoreM<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>;
def LB : Load<"lb", GPR32Opnd, sextloadi8, IILoad>, MMRel, LW_FM<0x20>;
def LBu : Load<"lbu", GPR32Opnd, zextloadi8, IILoad, addrDefault>, MMRel,
LW_FM<0x24>;
def LH : Load<"lh", GPR32Opnd, sextloadi16, IILoad, addrDefault>, MMRel,
LW_FM<0x21>;
def LHu : Load<"lhu", GPR32Opnd, zextloadi16, IILoad>, MMRel, LW_FM<0x25>;
def LW : Load<"lw", GPR32Opnd, load, IILoad, addrDefault>, MMRel,
LW_FM<0x23>;
def SB : Store<"sb", GPR32Opnd, truncstorei8, IIStore>, MMRel, LW_FM<0x28>;
def SH : Store<"sh", GPR32Opnd, truncstorei16, IIStore>, MMRel, LW_FM<0x29>;
def SW : Store<"sw", GPR32Opnd, store, IIStore>, MMRel, LW_FM<0x2b>;
/// load/store left/right
defm LWL : LoadLeftRightM<"lwl", MipsLWL, GPR32Opnd>, LW_FM<0x22>;
defm LWR : LoadLeftRightM<"lwr", MipsLWR, GPR32Opnd>, LW_FM<0x26>;
defm SWL : StoreLeftRightM<"swl", MipsSWL, GPR32Opnd>, LW_FM<0x2a>;
defm SWR : StoreLeftRightM<"swr", MipsSWR, GPR32Opnd>, LW_FM<0x2e>;
def LWL : LoadLeftRight<"lwl", MipsLWL, GPR32Opnd>, LW_FM<0x22>;
def LWR : LoadLeftRight<"lwr", MipsLWR, GPR32Opnd>, LW_FM<0x26>;
def SWL : StoreLeftRight<"swl", MipsSWL, GPR32Opnd>, LW_FM<0x2a>;
def SWR : StoreLeftRight<"swr", MipsSWR, GPR32Opnd>, LW_FM<0x2e>;
def SYNC : SYNC_FT, SYNC_FM;
def TEQ : TEQ_FT<"teq", GPR32Opnd>, TEQ_FM<0x34>;
@ -1002,15 +927,8 @@ def DI : DEI_FT<"di", GPR32Opnd>, EI_FM<0>;
def WAIT : WAIT_FT<"wait">;
/// Load-linked, Store-conditional
let Predicates = [NotN64, HasStdEnc] in {
def LL : LLBase<"ll", GPR32Opnd, mem>, LW_FM<0x30>;
def SC : SCBase<"sc", GPR32Opnd, mem>, LW_FM<0x38>;
}
let Predicates = [IsN64, HasStdEnc], DecoderNamespace = "Mips64" in {
def LL_P8 : LLBase<"ll", GPR32Opnd, mem64>, LW_FM<0x30>;
def SC_P8 : SCBase<"sc", GPR32Opnd, mem64>, LW_FM<0x38>;
}
def LL : LLBase<"ll", GPR32Opnd>, LW_FM<0x30>;
def SC : SCBase<"sc", GPR32Opnd>, LW_FM<0x38>;
/// Jump and Branch Instructions
def J : JumpFJ<jmptarget, "j", br, bb>, FJ<2>,
@ -1093,7 +1011,7 @@ def NOP : PseudoSE<(outs), (ins), []>, PseudoInstExpansion<(SLL ZERO, ZERO, 0)>;
// instructions. The same not happens for stack address copies, so an
// add op with mem ComplexPattern is used and the stack address copy
// can be matched. It's similar to Sparc LEA_ADDRi
def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd, mem_ea>, LW_FM<9>;
def LEA_ADDiu : EffectiveAddress<"addiu", GPR32Opnd>, LW_FM<9>;
// MADD*/MSUB*
def MADD : MArithR<"madd", 1>, MULT_FM<0x1c, 0>;
@ -1291,24 +1209,15 @@ def : MipsPat<(not GPR32:$in),
(NOR GPR32Opnd:$in, ZERO)>;
// extended loads
let Predicates = [NotN64, HasStdEnc] in {
let Predicates = [HasStdEnc] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>;
def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>;
}
let Predicates = [IsN64, HasStdEnc] in {
def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>;
def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>;
def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu_P8 addr:$src)>;
}
// peepholes
let Predicates = [NotN64, HasStdEnc] in {
def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
}
let Predicates = [IsN64, HasStdEnc] in {
def : MipsPat<(store (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>;
}
let Predicates = [HasStdEnc] in
def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>;
// brcond patterns
multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp,
@ -1405,16 +1314,11 @@ def : MipsPat<(i32 (ExtractLOHI ACC64:$ac, imm:$lohi_idx)),
// Load halfword/word patterns.
let AddedComplexity = 40 in {
let Predicates = [NotN64, HasStdEnc] in {
let Predicates = [HasStdEnc] in {
def : LoadRegImmPat<LBu, i32, zextloadi8>;
def : LoadRegImmPat<LH, i32, sextloadi16>;
def : LoadRegImmPat<LW, i32, load>;
}
let Predicates = [IsN64, HasStdEnc] in {
def : LoadRegImmPat<LBu_P8, i32, zextloadi8>;
def : LoadRegImmPat<LH_P8, i32, sextloadi16>;
def : LoadRegImmPat<LW_P8, i32, load>;
}
}
//===----------------------------------------------------------------------===//

View File

@ -47,6 +47,11 @@ MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST)
unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; }
const TargetRegisterClass *
MipsRegisterInfo::getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const {
return Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
}
unsigned
MipsRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,

View File

@ -42,6 +42,9 @@ public:
void adjustMipsStackFrame(MachineFunction &MF) const;
/// Code Generation virtual methods...
const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
unsigned Kind) const;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;

View File

@ -70,31 +70,23 @@ bool ExpandPseudo::expand() {
bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
switch(I->getOpcode()) {
case Mips::LOAD_CCOND_DSP:
case Mips::LOAD_CCOND_DSP_P8:
expandLoadCCond(MBB, I);
break;
case Mips::STORE_CCOND_DSP:
case Mips::STORE_CCOND_DSP_P8:
expandStoreCCond(MBB, I);
break;
case Mips::LOAD_ACC64:
case Mips::LOAD_ACC64_P8:
case Mips::LOAD_ACC64DSP:
case Mips::LOAD_ACC64DSP_P8:
expandLoadACC(MBB, I, 4);
break;
case Mips::LOAD_ACC128:
case Mips::LOAD_ACC128_P8:
expandLoadACC(MBB, I, 8);
break;
case Mips::STORE_ACC64:
case Mips::STORE_ACC64_P8:
case Mips::STORE_ACC64DSP:
case Mips::STORE_ACC64DSP_P8:
expandStoreACC(MBB, I, 4);
break;
case Mips::STORE_ACC128:
case Mips::STORE_ACC128_P8:
expandStoreACC(MBB, I, 8);
break;
case TargetOpcode::COPY:

View File

@ -49,10 +49,8 @@ isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) ||
(Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) ||
(Opc == Mips::LDC1) || (Opc == Mips::LDC164) ||
(Opc == Mips::LDC164_P8)) {
if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
(Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@ -74,10 +72,8 @@ isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const
{
unsigned Opc = MI->getOpcode();
if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) ||
(Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) ||
(Opc == Mips::SDC1) || (Opc == Mips::SDC164) ||
(Opc == Mips::SDC164_P8)) {
if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
(Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
if ((MI->getOperand(1).isFI()) && // is a stack slot
(MI->getOperand(2).isImm()) && // the imm is zero
(isZeroImm(MI->getOperand(2)))) {
@ -186,23 +182,23 @@ storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc = 0;
if (Mips::GPR32RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::SW_P8 : Mips::SW;
Opc = Mips::SW;
else if (Mips::GPR64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::SD_P8 : Mips::SD;
Opc = Mips::SD;
else if (Mips::ACC64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::STORE_ACC64_P8 : Mips::STORE_ACC64;
Opc = Mips::STORE_ACC64;
else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::STORE_ACC64DSP_P8 : Mips::STORE_ACC64DSP;
Opc = Mips::STORE_ACC64DSP;
else if (Mips::ACC128RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::STORE_ACC128_P8 : Mips::STORE_ACC128;
Opc = Mips::STORE_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::STORE_CCOND_DSP_P8 : Mips::STORE_CCOND_DSP;
Opc = Mips::STORE_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1;
Opc = Mips::SWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::SDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164;
Opc = Mips::SDC164;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
@ -219,23 +215,23 @@ loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned Opc = 0;
if (Mips::GPR32RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LW_P8 : Mips::LW;
Opc = Mips::LW;
else if (Mips::GPR64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LD_P8 : Mips::LD;
Opc = Mips::LD;
else if (Mips::ACC64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LOAD_ACC64_P8 : Mips::LOAD_ACC64;
Opc = Mips::LOAD_ACC64;
else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LOAD_ACC64DSP_P8 : Mips::LOAD_ACC64DSP;
Opc = Mips::LOAD_ACC64DSP;
else if (Mips::ACC128RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LOAD_ACC128_P8 : Mips::LOAD_ACC128;
Opc = Mips::LOAD_ACC128;
else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LOAD_CCOND_DSP_P8 : Mips::LOAD_CCOND_DSP;
Opc = Mips::LOAD_CCOND_DSP;
else if (Mips::FGR32RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1;
Opc = Mips::LWC1;
else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
Opc = Mips::LDC1;
else if (Mips::FGR64RegClass.hasSubClassEq(RC))
Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164;
Opc = Mips::LDC164;
assert(Opc && "Register class not handled!");
BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(Offset)