mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-28 04:33:05 +00:00
Fix the mneumonics for the mov instructions to have the source and destination
order in the correct sense!! Arg! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@11530 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
7ddc3fbd22
commit
e87331d11d
@ -490,20 +490,20 @@ void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
|
||||
switch (getClassB(I->getType())) {
|
||||
case cByte:
|
||||
FI = MFI->CreateFixedObject(1, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr8, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm8, 4, Reg), FI);
|
||||
break;
|
||||
case cShort:
|
||||
FI = MFI->CreateFixedObject(2, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr16, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm16, 4, Reg), FI);
|
||||
break;
|
||||
case cInt:
|
||||
FI = MFI->CreateFixedObject(4, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
|
||||
break;
|
||||
case cLong:
|
||||
FI = MFI->CreateFixedObject(8, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg+1), FI, 4);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg+1), FI, 4);
|
||||
ArgOffset += 4; // longs require 4 additional bytes
|
||||
break;
|
||||
case cFP:
|
||||
@ -1052,18 +1052,18 @@ void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
|
||||
// Promote arg to 32 bits wide into a temporary register...
|
||||
unsigned R = makeAnotherReg(Type::UIntTy);
|
||||
promote32(R, Args[i]);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(R);
|
||||
break;
|
||||
}
|
||||
case cInt:
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(ArgReg);
|
||||
break;
|
||||
case cLong:
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(ArgReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset+4).addReg(ArgReg+1);
|
||||
ArgOffset += 4; // 8 byte entry, not 4.
|
||||
break;
|
||||
@ -1203,7 +1203,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
|
||||
if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
|
||||
if (ID == Intrinsic::returnaddress) {
|
||||
// Just load the return address
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, TmpReg1),
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, TmpReg1),
|
||||
ReturnAddressIndex);
|
||||
} else {
|
||||
addFrameReference(BuildMI(BB, X86::LEAr32, 4, TmpReg1),
|
||||
@ -1835,13 +1835,13 @@ void ISel::visitLoadInst(LoadInst &I) {
|
||||
unsigned Class = getClassB(I.getType());
|
||||
|
||||
if (Class == cLong) {
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), SrcAddrReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), SrcAddrReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), SrcAddrReg, 4);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned Opcodes[] = {
|
||||
X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FLDr32
|
||||
X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDr32
|
||||
};
|
||||
unsigned Opcode = Opcodes[Class];
|
||||
if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
|
||||
@ -1859,13 +1859,13 @@ void ISel::visitStoreInst(StoreInst &I) {
|
||||
unsigned Class = getClassB(ValTy);
|
||||
|
||||
if (Class == cLong) {
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg,4).addReg(ValReg+1);
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg).addReg(ValReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg,4).addReg(ValReg+1);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned Opcodes[] = {
|
||||
X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FSTr32
|
||||
X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTr32
|
||||
};
|
||||
unsigned Opcode = Opcodes[Class];
|
||||
if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
|
||||
@ -2066,11 +2066,11 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
|
||||
|
||||
if (SrcClass == cLong) {
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 5),
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 5), FrameIdx).addReg(SrcReg);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 5),
|
||||
FrameIdx, 4).addReg(SrcReg+1);
|
||||
} else {
|
||||
static const unsigned Op1[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
|
||||
static const unsigned Op1[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
|
||||
addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
|
||||
}
|
||||
|
||||
@ -2090,7 +2090,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
|
||||
// Load the old value of the high byte of the control word...
|
||||
unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm8, 4, HighPartOfCW), CWFrameIdx, 1);
|
||||
|
||||
// Set the high part to be round to zero...
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
|
||||
@ -2099,7 +2099,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
|
||||
|
||||
// Restore the memory image of control word to original value
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm8, 5),
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr8, 5),
|
||||
CWFrameIdx, 1).addReg(HighPartOfCW);
|
||||
|
||||
// We don't have the facilities for directly storing byte sized data to
|
||||
@ -2128,10 +2128,10 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
|
||||
|
||||
if (DestClass == cLong) {
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg), FrameIdx);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg), FrameIdx);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg+1), FrameIdx, 4);
|
||||
} else {
|
||||
static const unsigned Op2[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
|
||||
static const unsigned Op2[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
|
||||
addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
|
||||
}
|
||||
|
||||
@ -2185,12 +2185,12 @@ void ISel::visitVAArgInst(VAArgInst &I) {
|
||||
case Type::PointerTyID:
|
||||
case Type::UIntTyID:
|
||||
case Type::IntTyID:
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
|
||||
break;
|
||||
case Type::ULongTyID:
|
||||
case Type::LongTyID:
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), VAList, 4);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), VAList, 4);
|
||||
break;
|
||||
case Type::DoubleTyID:
|
||||
addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
|
||||
|
@ -460,7 +460,7 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
|
||||
switch (MI->getOpcode()) {
|
||||
|
||||
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
|
||||
case X86::MOVrm32: case X86::MOVrm16: case X86::MOVrm8:
|
||||
case X86::MOVmr32: case X86::MOVmr16: case X86::MOVmr8:
|
||||
case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
|
||||
// Check to see if we can fold the source instruction into this one...
|
||||
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
|
||||
@ -478,9 +478,9 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
|
||||
return true;
|
||||
break;
|
||||
|
||||
case X86::MOVmr32:
|
||||
case X86::MOVmr16:
|
||||
case X86::MOVmr8:
|
||||
case X86::MOVrm32:
|
||||
case X86::MOVrm16:
|
||||
case X86::MOVrm8:
|
||||
// If we can optimize the addressing expression, do so now.
|
||||
if (OptimizeAddress(MI, 1))
|
||||
return true;
|
||||
|
@ -490,20 +490,20 @@ void ISel::LoadArgumentsToVirtualRegs(Function &Fn) {
|
||||
switch (getClassB(I->getType())) {
|
||||
case cByte:
|
||||
FI = MFI->CreateFixedObject(1, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr8, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm8, 4, Reg), FI);
|
||||
break;
|
||||
case cShort:
|
||||
FI = MFI->CreateFixedObject(2, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr16, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm16, 4, Reg), FI);
|
||||
break;
|
||||
case cInt:
|
||||
FI = MFI->CreateFixedObject(4, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
|
||||
break;
|
||||
case cLong:
|
||||
FI = MFI->CreateFixedObject(8, ArgOffset);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, Reg+1), FI, 4);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg), FI);
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, Reg+1), FI, 4);
|
||||
ArgOffset += 4; // longs require 4 additional bytes
|
||||
break;
|
||||
case cFP:
|
||||
@ -1052,18 +1052,18 @@ void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI,
|
||||
// Promote arg to 32 bits wide into a temporary register...
|
||||
unsigned R = makeAnotherReg(Type::UIntTy);
|
||||
promote32(R, Args[i]);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(R);
|
||||
break;
|
||||
}
|
||||
case cInt:
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(ArgReg);
|
||||
break;
|
||||
case cLong:
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset).addReg(ArgReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 5),
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 5),
|
||||
X86::ESP, ArgOffset+4).addReg(ArgReg+1);
|
||||
ArgOffset += 4; // 8 byte entry, not 4.
|
||||
break;
|
||||
@ -1203,7 +1203,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
|
||||
if (cast<Constant>(CI.getOperand(1))->isNullValue()) {
|
||||
if (ID == Intrinsic::returnaddress) {
|
||||
// Just load the return address
|
||||
addFrameReference(BuildMI(BB, X86::MOVmr32, 4, TmpReg1),
|
||||
addFrameReference(BuildMI(BB, X86::MOVrm32, 4, TmpReg1),
|
||||
ReturnAddressIndex);
|
||||
} else {
|
||||
addFrameReference(BuildMI(BB, X86::LEAr32, 4, TmpReg1),
|
||||
@ -1835,13 +1835,13 @@ void ISel::visitLoadInst(LoadInst &I) {
|
||||
unsigned Class = getClassB(I.getType());
|
||||
|
||||
if (Class == cLong) {
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), SrcAddrReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), SrcAddrReg, 4);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), SrcAddrReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), SrcAddrReg, 4);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned Opcodes[] = {
|
||||
X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FLDr32
|
||||
X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDr32
|
||||
};
|
||||
unsigned Opcode = Opcodes[Class];
|
||||
if (I.getType() == Type::DoubleTy) Opcode = X86::FLDr64;
|
||||
@ -1859,13 +1859,13 @@ void ISel::visitStoreInst(StoreInst &I) {
|
||||
unsigned Class = getClassB(ValTy);
|
||||
|
||||
if (Class == cLong) {
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg).addReg(ValReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 1+4), AddressReg,4).addReg(ValReg+1);
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg).addReg(ValReg);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 1+4), AddressReg,4).addReg(ValReg+1);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned Opcodes[] = {
|
||||
X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FSTr32
|
||||
X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTr32
|
||||
};
|
||||
unsigned Opcode = Opcodes[Class];
|
||||
if (ValTy == Type::DoubleTy) Opcode = X86::FSTr64;
|
||||
@ -2066,11 +2066,11 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
F->getFrameInfo()->CreateStackObject(SrcTy, TM.getTargetData());
|
||||
|
||||
if (SrcClass == cLong) {
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 5), FrameIdx).addReg(SrcReg);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 5),
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 5), FrameIdx).addReg(SrcReg);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 5),
|
||||
FrameIdx, 4).addReg(SrcReg+1);
|
||||
} else {
|
||||
static const unsigned Op1[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
|
||||
static const unsigned Op1[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
|
||||
addFrameReference(BMI(BB, IP, Op1[SrcClass], 5), FrameIdx).addReg(SrcReg);
|
||||
}
|
||||
|
||||
@ -2090,7 +2090,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
|
||||
// Load the old value of the high byte of the control word...
|
||||
unsigned HighPartOfCW = makeAnotherReg(Type::UByteTy);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm8, 4, HighPartOfCW), CWFrameIdx, 1);
|
||||
|
||||
// Set the high part to be round to zero...
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
|
||||
@ -2099,7 +2099,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
|
||||
|
||||
// Restore the memory image of control word to original value
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm8, 5),
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr8, 5),
|
||||
CWFrameIdx, 1).addReg(HighPartOfCW);
|
||||
|
||||
// We don't have the facilities for directly storing byte sized data to
|
||||
@ -2128,10 +2128,10 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
|
||||
addFrameReference(BMI(BB, IP, Op1[StoreClass], 5), FrameIdx).addReg(SrcReg);
|
||||
|
||||
if (DestClass == cLong) {
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg), FrameIdx);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVmr32, 4, DestReg+1), FrameIdx, 4);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg), FrameIdx);
|
||||
addFrameReference(BMI(BB, IP, X86::MOVrm32, 4, DestReg+1), FrameIdx, 4);
|
||||
} else {
|
||||
static const unsigned Op2[] = { X86::MOVmr8, X86::MOVmr16, X86::MOVmr32 };
|
||||
static const unsigned Op2[] = { X86::MOVrm8, X86::MOVrm16, X86::MOVrm32 };
|
||||
addFrameReference(BMI(BB, IP, Op2[DestClass], 4, DestReg), FrameIdx);
|
||||
}
|
||||
|
||||
@ -2185,12 +2185,12 @@ void ISel::visitVAArgInst(VAArgInst &I) {
|
||||
case Type::PointerTyID:
|
||||
case Type::UIntTyID:
|
||||
case Type::IntTyID:
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
|
||||
break;
|
||||
case Type::ULongTyID:
|
||||
case Type::LongTyID:
|
||||
addDirectMem(BuildMI(BB, X86::MOVmr32, 4, DestReg), VAList);
|
||||
addRegOffset(BuildMI(BB, X86::MOVmr32, 4, DestReg+1), VAList, 4);
|
||||
addDirectMem(BuildMI(BB, X86::MOVrm32, 4, DestReg), VAList);
|
||||
addRegOffset(BuildMI(BB, X86::MOVrm32, 4, DestReg+1), VAList, 4);
|
||||
break;
|
||||
case Type::DoubleTyID:
|
||||
addDirectMem(BuildMI(BB, X86::FLDr64, 4, DestReg), VAList);
|
||||
|
@ -202,15 +202,15 @@ def MOVmi8 : X86Inst<"mov", 0xC6, MRMS0m , Arg8>; // [mem] = imm
|
||||
def MOVmi16 : X86Inst<"mov", 0xC7, MRMS0m , Arg16>, OpSize; // [mem] = imm16
|
||||
def MOVmi32 : X86Inst<"mov", 0xC7, MRMS0m , Arg32>; // [mem] = imm32
|
||||
|
||||
def MOVmr8 : X86Inst<"mov", 0x8A, MRMSrcMem , Arg8>; // R8 = [mem]
|
||||
def MOVmr16 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg16>, OpSize, // R16 = [mem]
|
||||
def MOVrm8 : X86Inst<"mov", 0x8A, MRMSrcMem , Arg8>; // R8 = [mem]
|
||||
def MOVrm16 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg16>, OpSize, // R16 = [mem]
|
||||
Pattern<(set R16, (load (plus R32, (plus (times imm, R32), imm))))>;
|
||||
def MOVmr32 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg32>, // R32 = [mem]
|
||||
def MOVrm32 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg32>, // R32 = [mem]
|
||||
Pattern<(set R32, (load (plus R32, (plus (times imm, R32), imm))))>;
|
||||
|
||||
def MOVrm8 : X86Inst<"mov", 0x88, MRMDestMem, Arg8>; // [mem] = R8
|
||||
def MOVrm16 : X86Inst<"mov", 0x89, MRMDestMem, Arg16>, OpSize; // [mem] = R16
|
||||
def MOVrm32 : X86Inst<"mov", 0x89, MRMDestMem, Arg32>; // [mem] = R32
|
||||
def MOVmr8 : X86Inst<"mov", 0x88, MRMDestMem, Arg8>; // [mem] = R8
|
||||
def MOVmr16 : X86Inst<"mov", 0x89, MRMDestMem, Arg16>, OpSize; // [mem] = R16
|
||||
def MOVmr32 : X86Inst<"mov", 0x89, MRMDestMem, Arg32>; // [mem] = R32
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Fixed-Register Multiplication and Division Instructions...
|
||||
@ -555,17 +555,17 @@ def RET_R32 : Expander<(ret R32:$reg),
|
||||
// FIXME: This should eventually just be implemented by defining a frameidx as a
|
||||
// value address for a load.
|
||||
def LOAD_FI16 : Expander<(set R16:$dest, (load frameidx:$fi)),
|
||||
[(MOVmr16 R16:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
|
||||
[(MOVrm16 R16:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
|
||||
|
||||
def LOAD_FI32 : Expander<(set R32:$dest, (load frameidx:$fi)),
|
||||
[(MOVmr32 R32:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
|
||||
[(MOVrm32 R32:$dest, frameidx:$fi, 1, 0/*NoReg*/, 0)]>;
|
||||
|
||||
|
||||
def LOAD_R16 : Expander<(set R16:$dest, (load R32:$src)),
|
||||
[(MOVmr16 R16:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
|
||||
[(MOVrm16 R16:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
|
||||
|
||||
def LOAD_R32 : Expander<(set R32:$dest, (load R32:$src)),
|
||||
[(MOVmr32 R32:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
|
||||
[(MOVrm32 R32:$dest, R32:$src, 1, 0/*NoReg*/, 0)]>;
|
||||
|
||||
def BR_EQ : Expander<(brcond (seteq R32:$a1, R32:$a2),
|
||||
basicblock:$d1, basicblock:$d2),
|
||||
|
@ -460,7 +460,7 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
|
||||
switch (MI->getOpcode()) {
|
||||
|
||||
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
|
||||
case X86::MOVrm32: case X86::MOVrm16: case X86::MOVrm8:
|
||||
case X86::MOVmr32: case X86::MOVmr16: case X86::MOVmr8:
|
||||
case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
|
||||
// Check to see if we can fold the source instruction into this one...
|
||||
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
|
||||
@ -478,9 +478,9 @@ bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
|
||||
return true;
|
||||
break;
|
||||
|
||||
case X86::MOVmr32:
|
||||
case X86::MOVmr16:
|
||||
case X86::MOVmr8:
|
||||
case X86::MOVrm32:
|
||||
case X86::MOVrm16:
|
||||
case X86::MOVrm8:
|
||||
// If we can optimize the addressing expression, do so now.
|
||||
if (OptimizeAddress(MI, 1))
|
||||
return true;
|
||||
|
@ -51,7 +51,7 @@ int X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
unsigned SrcReg, int FrameIdx,
|
||||
const TargetRegisterClass *RC) const {
|
||||
static const unsigned Opcode[] =
|
||||
{ X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FSTPr80 };
|
||||
{ X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FSTPr80 };
|
||||
MachineInstr *I = addFrameReference(BuildMI(Opcode[getIdx(RC)], 5),
|
||||
FrameIdx).addReg(SrcReg);
|
||||
MBB.insert(MI, I);
|
||||
@ -63,7 +63,7 @@ int X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
unsigned DestReg, int FrameIdx,
|
||||
const TargetRegisterClass *RC) const{
|
||||
static const unsigned Opcode[] =
|
||||
{ X86::MOVmr8, X86::MOVmr16, X86::MOVmr32, X86::FLDr80 };
|
||||
{ X86::MOVrm8, X86::MOVrm16, X86::MOVrm32, X86::FLDr80 };
|
||||
unsigned OC = Opcode[getIdx(RC)];
|
||||
MBB.insert(MI, addFrameReference(BuildMI(OC, 4, DestReg), FrameIdx));
|
||||
return 1;
|
||||
@ -112,9 +112,9 @@ bool X86RegisterInfo::foldMemoryOperand(MachineBasicBlock::iterator &MI,
|
||||
MachineInstr* NI = 0;
|
||||
if (i == 0) {
|
||||
switch(MI->getOpcode()) {
|
||||
case X86::MOVrr8: NI = MakeMRInst(X86::MOVrm8 , FrameIndex, MI); break;
|
||||
case X86::MOVrr16: NI = MakeMRInst(X86::MOVrm16, FrameIndex, MI); break;
|
||||
case X86::MOVrr32: NI = MakeMRInst(X86::MOVrm32, FrameIndex, MI); break;
|
||||
case X86::MOVrr8: NI = MakeMRInst(X86::MOVmr8 , FrameIndex, MI); break;
|
||||
case X86::MOVrr16: NI = MakeMRInst(X86::MOVmr16, FrameIndex, MI); break;
|
||||
case X86::MOVrr32: NI = MakeMRInst(X86::MOVmr32, FrameIndex, MI); break;
|
||||
case X86::ADDrr8: NI = MakeMRInst(X86::ADDmr8 , FrameIndex, MI); break;
|
||||
case X86::ADDrr16: NI = MakeMRInst(X86::ADDmr16, FrameIndex, MI); break;
|
||||
case X86::ADDrr32: NI = MakeMRInst(X86::ADDmr32, FrameIndex, MI); break;
|
||||
@ -131,9 +131,9 @@ bool X86RegisterInfo::foldMemoryOperand(MachineBasicBlock::iterator &MI,
|
||||
}
|
||||
} else if (i == 1) {
|
||||
switch(MI->getOpcode()) {
|
||||
case X86::MOVrr8: NI = MakeRMInst(X86::MOVmr8 , FrameIndex, MI); break;
|
||||
case X86::MOVrr16: NI = MakeRMInst(X86::MOVmr16, FrameIndex, MI); break;
|
||||
case X86::MOVrr32: NI = MakeRMInst(X86::MOVmr32, FrameIndex, MI); break;
|
||||
case X86::MOVrr8: NI = MakeRMInst(X86::MOVrm8 , FrameIndex, MI); break;
|
||||
case X86::MOVrr16: NI = MakeRMInst(X86::MOVrm16, FrameIndex, MI); break;
|
||||
case X86::MOVrr32: NI = MakeRMInst(X86::MOVrm32, FrameIndex, MI); break;
|
||||
case X86::ADDrr8: NI = MakeRMInst(X86::ADDrm8 , FrameIndex, MI); break;
|
||||
case X86::ADDrr16: NI = MakeRMInst(X86::ADDrm16, FrameIndex, MI); break;
|
||||
case X86::ADDrr32: NI = MakeRMInst(X86::ADDrm32, FrameIndex, MI); break;
|
||||
@ -254,7 +254,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
}
|
||||
|
||||
// Save EBP into the appropriate stack slot...
|
||||
MI = addRegOffset(BuildMI(X86::MOVrm32, 5), // mov [ESP-<offset>], EBP
|
||||
MI = addRegOffset(BuildMI(X86::MOVmr32, 5), // mov [ESP-<offset>], EBP
|
||||
X86::ESP, EBPOffset+NumBytes).addReg(X86::EBP);
|
||||
MBB.insert(MBBI, MI);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user